blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
140
| path
stringlengths 5
183
| src_encoding
stringclasses 6
values | length_bytes
int64 12
5.32M
| score
float64 2.52
4.94
| int_score
int64 3
5
| detected_licenses
listlengths 0
47
| license_type
stringclasses 2
values | text
stringlengths 12
5.32M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e46e0749a203eb9b484770abacfc1988a6023237
|
Rust
|
IThawk/rust-project
|
/rust-master/src/test/ui/rfc-2093-infer-outlives/regions-enum-not-wf.rs
|
UTF-8
| 921
| 2.796875
| 3
|
[
"MIT",
"LicenseRef-scancode-other-permissive",
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"NCSA"
] |
permissive
|
// ignore-tidy-linelength
// Various examples of structs whose fields are not well-formed.
#![allow(dead_code)]
trait Dummy<'a> {
type Out;
}
impl<'a, T> Dummy<'a> for T
where T: 'a
{
type Out = ();
}
type RequireOutlives<'a, T> = <T as Dummy<'a>>::Out;
enum Ref1<'a, T> {
Ref1Variant1(RequireOutlives<'a, T>) //~ ERROR the parameter type `T` may not live long enough
}
enum Ref2<'a, T> {
Ref2Variant1,
Ref2Variant2(isize, RequireOutlives<'a, T>), //~ ERROR the parameter type `T` may not live long enough
}
enum RefOk<'a, T:'a> {
RefOkVariant1(&'a T)
}
// This is now well formed. RFC 2093
enum RefIndirect<'a, T> {
RefIndirectVariant1(isize, RefOk<'a,T>)
}
enum RefDouble<'a, 'b, T> { //~ ERROR the parameter type `T` may not live long enough [E0309]
RefDoubleVariant1(&'a RequireOutlives<'b, T>)
//~^ the parameter type `T` may not live long enough [E0309]
}
fn main() { }
| true
|
51e617cad36d20e31ade91ca3f8f546d0f119588
|
Rust
|
1aguna/arrow2
|
/src/bitmap/bitmap_ops.rs
|
UTF-8
| 5,275
| 3.21875
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
use std::ops::{BitAnd, BitOr, BitXor, Not};
use crate::buffer::MutableBuffer;
use super::{
utils::{BitChunkIterExact, BitChunksExact},
Bitmap,
};
/// Apply a bitwise operation `op` to four inputs and return the result as a [`Bitmap`].
pub fn quaternary<F>(a1: &Bitmap, a2: &Bitmap, a3: &Bitmap, a4: &Bitmap, op: F) -> Bitmap
where
F: Fn(u64, u64, u64, u64) -> u64,
{
assert_eq!(a1.len(), a2.len());
assert_eq!(a1.len(), a3.len());
assert_eq!(a1.len(), a4.len());
let a1_chunks = a1.chunks();
let a2_chunks = a2.chunks();
let a3_chunks = a3.chunks();
let a4_chunks = a4.chunks();
let rem_a1 = a1_chunks.remainder();
let rem_a2 = a2_chunks.remainder();
let rem_a3 = a3_chunks.remainder();
let rem_a4 = a4_chunks.remainder();
let chunks = a1_chunks
.zip(a2_chunks)
.zip(a3_chunks)
.zip(a4_chunks)
.map(|(((a1, a2), a3), a4)| op(a1, a2, a3, a4));
let buffer = MutableBuffer::from_chunk_iter(
chunks.chain(std::iter::once(op(rem_a1, rem_a2, rem_a3, rem_a4))),
);
let length = a1.len();
Bitmap::from_u8_buffer(buffer, length)
}
/// Apply a bitwise operation `op` to three inputs and return the result as a [`Bitmap`].
pub fn ternary<F>(a1: &Bitmap, a2: &Bitmap, a3: &Bitmap, op: F) -> Bitmap
where
F: Fn(u64, u64, u64) -> u64,
{
assert_eq!(a1.len(), a2.len());
assert_eq!(a1.len(), a3.len());
let a1_chunks = a1.chunks();
let a2_chunks = a2.chunks();
let a3_chunks = a3.chunks();
let rem_a1 = a1_chunks.remainder();
let rem_a2 = a2_chunks.remainder();
let rem_a3 = a3_chunks.remainder();
let chunks = a1_chunks
.zip(a2_chunks)
.zip(a3_chunks)
.map(|((a1, a2), a3)| op(a1, a2, a3));
let buffer =
MutableBuffer::from_chunk_iter(chunks.chain(std::iter::once(op(rem_a1, rem_a2, rem_a3))));
let length = a1.len();
Bitmap::from_u8_buffer(buffer, length)
}
/// Apply a bitwise operation `op` to two inputs and return the result as a [`Bitmap`].
pub fn binary<F>(lhs: &Bitmap, rhs: &Bitmap, op: F) -> Bitmap
where
F: Fn(u64, u64) -> u64,
{
assert_eq!(lhs.len(), rhs.len());
let lhs_chunks = lhs.chunks();
let rhs_chunks = rhs.chunks();
let rem_lhs = lhs_chunks.remainder();
let rem_rhs = rhs_chunks.remainder();
let chunks = lhs_chunks
.zip(rhs_chunks)
.map(|(left, right)| op(left, right));
let buffer =
MutableBuffer::from_chunk_iter(chunks.chain(std::iter::once(op(rem_lhs, rem_rhs))));
let length = lhs.len();
Bitmap::from_u8_buffer(buffer, length)
}
fn unary_impl<F, I>(iter: I, op: F, length: usize) -> Bitmap
where
I: BitChunkIterExact<u64>,
F: Fn(u64) -> u64,
{
let rem = op(iter.remainder());
let iterator = iter.map(op).chain(std::iter::once(rem));
let buffer = MutableBuffer::from_chunk_iter(iterator);
Bitmap::from_u8_buffer(buffer, length)
}
/// Apply a bitwise operation `op` to one input and return the result as a [`Bitmap`].
pub fn unary<F>(lhs: &Bitmap, op: F) -> Bitmap
where
F: Fn(u64) -> u64,
{
let (slice, offset, length) = lhs.as_slice();
if offset == 0 {
let iter = BitChunksExact::<u64>::new(slice, length);
unary_impl(iter, op, lhs.len())
} else {
let iter = lhs.chunks::<u64>();
unary_impl(iter, op, lhs.len())
}
}
// create a new [`Bitmap`] semantically equal to ``bitmap`` but with an offset equal to ``offset``
pub(crate) fn align(bitmap: &Bitmap, new_offset: usize) -> Bitmap {
let length = bitmap.len();
let bitmap: Bitmap = std::iter::repeat(false)
.take(new_offset)
.chain(bitmap.iter())
.collect();
bitmap.slice(new_offset, length)
}
#[inline]
fn and(lhs: &Bitmap, rhs: &Bitmap) -> Bitmap {
binary(lhs, rhs, |x, y| x & y)
}
#[inline]
fn or(lhs: &Bitmap, rhs: &Bitmap) -> Bitmap {
binary(lhs, rhs, |x, y| x | y)
}
#[inline]
fn xor(lhs: &Bitmap, rhs: &Bitmap) -> Bitmap {
binary(lhs, rhs, |x, y| x ^ y)
}
fn eq(lhs: &Bitmap, rhs: &Bitmap) -> bool {
if lhs.len() != rhs.len() {
return false;
}
let mut lhs_chunks = lhs.chunks::<u64>();
let mut rhs_chunks = rhs.chunks::<u64>();
let equal_chunks = lhs_chunks
.by_ref()
.zip(rhs_chunks.by_ref())
.all(|(left, right)| left == right);
if !equal_chunks {
return false;
}
let lhs_remainder = lhs_chunks.remainder_iter();
let rhs_remainder = rhs_chunks.remainder_iter();
lhs_remainder.zip(rhs_remainder).all(|(x, y)| x == y)
}
impl PartialEq for Bitmap {
fn eq(&self, other: &Self) -> bool {
eq(self, other)
}
}
impl<'a, 'b> BitOr<&'b Bitmap> for &'a Bitmap {
type Output = Bitmap;
fn bitor(self, rhs: &'b Bitmap) -> Bitmap {
or(self, rhs)
}
}
impl<'a, 'b> BitAnd<&'b Bitmap> for &'a Bitmap {
type Output = Bitmap;
fn bitand(self, rhs: &'b Bitmap) -> Bitmap {
and(self, rhs)
}
}
impl<'a, 'b> BitXor<&'b Bitmap> for &'a Bitmap {
type Output = Bitmap;
fn bitxor(self, rhs: &'b Bitmap) -> Bitmap {
xor(self, rhs)
}
}
impl Not for &Bitmap {
type Output = Bitmap;
fn not(self) -> Bitmap {
unary(self, |a| !a)
}
}
| true
|
c4a9d1946bd5ae85fdad79d91bc49c24c8260085
|
Rust
|
natal/roft
|
/src/edge.rs
|
UTF-8
| 280
| 2.75
| 3
|
[] |
no_license
|
use node::Node;
use vertex::Vertex;
#[deriving(Clone)]
pub struct Edge
{
node_1: @mut Node<Vertex>,
node_2: @mut Node<Vertex>
}
impl Edge
{
pub fn new(n1: @mut Node<Vertex>, n2: @mut Node<Vertex>) -> Edge
{
Edge
{
node_1: n1,
node_2: n2
}
}
}
| true
|
82aacba8b9e09962cc79b6b23bc43d98a74b7c4f
|
Rust
|
tomc1998/rs-coral
|
/src/entity/layout.rs
|
UTF-8
| 4,279
| 3.609375
| 4
|
[] |
no_license
|
//! This module defines components and types relating to layout. For the layout system, the code
//! which is in charge of performing actual layouts, see the layout_system module.
use specs;
use common::{ScreenVec, Constraints};
use entity::*;
/// A layout strategy - when laying out, how should this entity position its children and size
/// itself according to the input constraints?
///
/// If the given entity has the wrong amount of children when laying out, then an assertion will be
/// thrown (when in debug mode).
#[derive(Copy)]
pub enum LayoutStrategy {
/// Num children: 1
///
/// Center the given child. Child can expand to fill the max constraints of the parent.
Center,
/// Num children: 0
///
/// Maximise this component's size according to the constraints.
Max,
/// Num children: 0
///
/// Size this component to be a lerp between the constraint's min / max.
/// Works separately in x / y.
Proportion(f32, f32),
/// Num children: Any
///
/// Implement a custom layout function for this component and all of its children.
/// Pass a function pointer which takes an entity ID and constraints, plus a reference to
/// layout + children storage, then lays out the given entity + all of its children.
///
/// Also takes a mutable reference to the LayoutSystem, to call back into layout().
///
/// # Example
/// ```
/// fn my_layout(&mut self, root: Entity,
/// c: Constraints,
/// layout_storage: &mut specs::WriteStorage<LayoutComponent>,
/// children_storage: &specs::ReadStorage<ChildrenComponent>,
/// layout_system: &mut LayoutSystem) -> ScreenVec {
///
/// // Get root's layout component and children using the layout and children storage:
/// let root_layout = layout_storage.get(root)
/// .cloned()
/// .expect("Tried to layout an entity without a layout component!");
/// let children = children_storage.get(root)
/// .cloned()
/// .expect("Tried to layout an entity without a children component!").children;
///
/// // First call all the children's layouts (realistically you'd change the constraints
/// // for the children)
/// for child in children {
/// layout_system.layout(child, c, layout_storage, children_storage);
/// }
///
/// // Now layout this entity (notice the lack of .cloned() here, this is a mutable
/// // reference rather than just a value)
/// let root_layout = layout_storage.get_mut(root).unwrap();
/// let final_size = ScreenVec::new(c.max_w, c.max_h); // As big as possible
/// root_layout.size = final_size;
///
/// // Finally, return the final size of this component.
/// return final_size;
/// }
/// ```
Custom(fn(Entity, Constraints,
&mut specs::WriteStorage<LayoutComponent>,
&specs::ReadStorage<ChildrenComponent>,
&mut LayoutSystem) -> ScreenVec),
}
impl Clone for LayoutStrategy {
fn clone(&self) -> Self {
*self
}
}
impl LayoutStrategy {
/// Returns the number of children expected, or none if the amount of children is variable.
pub fn expected_children(&self) -> Option<usize> {
match *self {
LayoutStrategy::Center => Some(1),
LayoutStrategy::Max => Some(0),
LayoutStrategy::Proportion(_,_) => Some(0),
LayoutStrategy::Custom(_) => None,
}
}
}
/// A component defining a component's layout - the layout strategy, the current offset, and its
/// current size.
#[derive(Clone)]
pub struct LayoutComponent {
pub offset: ScreenVec,
pub size: ScreenVec,
pub strategy: LayoutStrategy,
pub invalidated: bool,
}
impl LayoutComponent {
pub fn new(strategy: LayoutStrategy) -> LayoutComponent {
LayoutComponent {
offset: ScreenVec::new(0,0),
size: ScreenVec::new(0,0),
strategy: strategy,
invalidated: true,
}
}
}
impl specs::Component for LayoutComponent {
type Storage = specs::VecStorage<Self>;
}
| true
|
dc807d063b5bc0fd290289475f90daaf1c1565e1
|
Rust
|
RustStudy/nomicon_book_vec
|
/src/refactor3.rs
|
UTF-8
| 7,933
| 2.984375
| 3
|
[] |
no_license
|
#![feature(allocator_api)]
use std::ptr::{NonNull, self};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::marker::PhantomData;
use std::heap::{Alloc, Layout, Global};
use std::alloc::oom;
#[derive(Debug)]
struct RawVec<T> {
ptr: NonNull<T>,
cap: usize,
}
impl<T> RawVec<T> {
fn new() -> Self {
// !0 is usize::MAX. This branch should be stripped at compile time.
let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
RawVec { ptr: NonNull::dangling(), cap: cap }
}
fn grow(&mut self) {
unsafe {
let elem_size = mem::size_of::<T>();
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the Vec is overfull.
assert!(elem_size != 0, "capacity overflow");
let (new_cap, ptr) = if self.cap == 0 {
let ptr = Global.alloc(Layout::array::<T>(1).unwrap());
(1, ptr)
} else {
let new_cap = 2 * self.cap;
let ptr = Global.realloc(NonNull::from(self.ptr).as_opaque(),
Layout::array::<T>(self.cap).unwrap(),
Layout::array::<T>(new_cap).unwrap().size());
(new_cap, ptr)
};
// If allocate or reallocate fail, oom
let ptr = match ptr {
Ok(ptr) => ptr,
Err(_err) => oom(),
};
self.ptr = NonNull::new_unchecked(ptr.as_ptr() as *mut _);
self.cap = new_cap;
}
}
}
impl<T> Drop for RawVec<T> {
fn drop(&mut self) {
let elem_size = mem::size_of::<T>();
if self.cap != 0 && elem_size != 0 {
unsafe {
println!("drop rawvec");
Global.dealloc(NonNull::from(self.ptr).as_opaque(),
Layout::array::<T>(self.cap).unwrap());
}
}
}
}
#[derive(Debug)]
pub struct Vec<T> {
buf: RawVec<T>,
len: usize,
}
impl<T> Vec<T> {
fn ptr(&self) -> *mut T { self.buf.ptr.as_ptr() }
fn cap(&self) -> usize { self.buf.cap }
pub fn new() -> Self {
Vec { buf: RawVec::new(), len: 0 }
}
pub fn push(&mut self, elem: T) {
if self.len == self.cap() { self.buf.grow(); }
unsafe {
ptr::write(self.ptr().offset(self.len as isize), elem);
}
// Can't fail, we'll OOM first.
self.len += 1;
}
pub fn pop(&mut self) -> Option<T> {
if self.len == 0 {
None
} else {
self.len -= 1;
unsafe {
Some(ptr::read(self.ptr().offset(self.len as isize)))
}
}
}
pub fn insert(&mut self, index: usize, elem: T) {
assert!(index <= self.len, "index out of bounds");
if self.cap() == self.len { self.buf.grow(); }
unsafe {
if index < self.len {
ptr::copy(self.ptr().offset(index as isize),
self.ptr().offset(index as isize + 1),
self.len - index);
}
ptr::write(self.ptr().offset(index as isize), elem);
self.len += 1;
}
}
pub fn remove(&mut self, index: usize) -> T {
assert!(index < self.len, "index out of bounds");
unsafe {
self.len -= 1;
let result = ptr::read(self.ptr().offset(index as isize));
ptr::copy(self.ptr().offset(index as isize + 1),
self.ptr().offset(index as isize),
self.len - index);
result
}
}
pub fn into_iter(self) -> IntoIter<T> {
unsafe {
let iter = RawValIter::new(&self);
let buf = ptr::read(&self.buf);
mem::forget(self);
IntoIter {
iter: iter,
_buf: buf,
}
}
}
pub fn drain(&mut self) -> Drain<T> {
unsafe {
let iter = RawValIter::new(&self);
// this is a mem::forget safety thing. If Drain is forgotten, we just
// leak the whole Vec's contents. Also we need to do this *eventually*
// anyway, so why not do it now?
self.len = 0;
Drain {
iter: iter,
vec: PhantomData,
}
}
}
}
impl<T> Drop for Vec<T> {
fn drop(&mut self) {
println!("drop vec!");
while let Some(_) = self.pop() {}
// allocation is handled by RawVec
}
}
impl<T> Deref for Vec<T> {
type Target = [T];
fn deref(&self) -> &[T] {
unsafe {
::std::slice::from_raw_parts(self.ptr(), self.len)
}
}
}
impl<T> DerefMut for Vec<T> {
fn deref_mut(&mut self) -> &mut [T] {
unsafe {
::std::slice::from_raw_parts_mut(self.ptr(), self.len)
}
}
}
struct RawValIter<T> {
start: *const T,
end: *const T,
}
impl<T> RawValIter<T> {
unsafe fn new(slice: &[T]) -> Self {
RawValIter {
start: slice.as_ptr(),
end: if mem::size_of::<T>() == 0 {
((slice.as_ptr() as usize) + slice.len()) as *const _
} else if slice.len() == 0 {
slice.as_ptr()
} else {
slice.as_ptr().offset(slice.len() as isize)
}
}
}
}
impl<T> Iterator for RawValIter<T> {
type Item = T;
fn next(&mut self) -> Option<T> {
if self.start == self.end {
None
} else {
unsafe {
let result = ptr::read(self.start);
self.start = if mem::size_of::<T>() == 0 {
(self.start as usize + 1) as *const _
} else {
self.start.offset(1)
};
Some(result)
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let elem_size = mem::size_of::<T>();
let len = (self.end as usize - self.start as usize)
/ if elem_size == 0 { 1 } else { elem_size };
(len, Some(len))
}
}
impl<T> DoubleEndedIterator for RawValIter<T> {
fn next_back(&mut self) -> Option<T> {
if self.start == self.end {
None
} else {
unsafe {
self.end = if mem::size_of::<T>() == 0 {
(self.end as usize - 1) as *const _
} else {
self.end.offset(-1)
};
Some(ptr::read(self.end))
}
}
}
}
pub struct IntoIter<T> {
_buf: RawVec<T>, // we don't actually care about this. Just need it to live.
iter: RawValIter<T>,
}
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<T> { self.iter.next() }
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
impl<T> DoubleEndedIterator for IntoIter<T> {
fn next_back(&mut self) -> Option<T> { self.iter.next_back() }
}
impl<T> Drop for IntoIter<T> {
fn drop(&mut self) {
for _ in &mut *self {}
}
}
pub struct Drain<'a, T: 'a> {
vec: PhantomData<&'a mut Vec<T>>,
iter: RawValIter<T>,
}
impl<'a, T> Iterator for Drain<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> { self.iter.next_back() }
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
impl<'a, T> DoubleEndedIterator for Drain<'a, T> {
fn next_back(&mut self) -> Option<T> { self.iter.next_back() }
}
impl<'a, T> Drop for Drain<'a, T> {
fn drop(&mut self) {
// pre-drain the iter
for _ in &mut self.iter {}
}
}
fn main(){
let mut v = Vec::new();
v.push(1);
v.push(2);
println!("{:?}", v);
for i in v.iter() {
println!("{:?}", i);
}
}
| true
|
ac381b01498fe016f137f61341483dabbda366bf
|
Rust
|
jiyilanzhou/chw
|
/std_module/4_trait/3_Borrow.rs
|
UTF-8
| 2,443
| 3.546875
| 4
|
[] |
no_license
|
/*
0. Trait std::borrow::Borrow
a. 源码
// 文档:" https://doc.rust-lang.org/std/borrow/trait.Borrow.html "
Trait std::borrow::Borrow
[−]
pub trait Borrow<Borrowed>
where
Borrowed: ?Sized,
{
fn borrow(&self) -> &Borrowed;
}
[−]
A trait for borrowing data.
In Rust, it is common to provide different representations of a type for different use
cases. For instance, storage location and management for a value can be specifically chosen
as appropriate for a particular use via pointer types such as Box<T> or Rc<T>. Beyond these
generic wrappers that can be used with any type, some types provide optional facets
providing potentially costly functionality. An example for such a type is String which adds
the ability to extend a string to the basic str. This requires keeping additional
information unnecessary for a simple, immutable string.
These types provide access to the underlying data through references to the type of that
data. They are said to be ‘borrowed as’ that type. For instance, a Box<T> can be borrowed as
T while a String can be borrowed as str.
Types express that they can be borrowed as some type T by implementing Borrow<T>, providing
a reference to a T in the trait’s borrow method. A type is free to borrow as several
different types. If it wishes to mutably borrow as the type – allowing the underlying data
to be modified, it can additionally implement BorrowMut<T>.
Further, when providing implementations for additional traits, it needs to be considered
whether they should behave identical to those of the underlying type as a consequence of
acting as a representation of that underlying type. Generic code typically uses Borrow<T>
when it relies on the identical behavior of these additional trait implementations. These
traits will likely appear as additional trait bounds.
In particular Eq, Ord and Hash must be equivalent for borrowed and owned values:
x.borrow() == y.borrow() should give the same result as x == y.
If generic code merely needs to work for all types that can provide a reference to related
type T, it is often better to use AsRef<T> as more types can safely implement it.
b.
*/
| true
|
faae9304add2d3017ad06e6f45a6475272e3b952
|
Rust
|
eranfu/leetcode
|
/examples/1588_所有奇数长度子数组的和.rs
|
UTF-8
| 788
| 3.953125
| 4
|
[] |
no_license
|
//! [1588. 所有奇数长度子数组的和](https://leetcode-cn.com/problems/sum-of-all-odd-length-subarrays/)
impl Solution {
pub fn sum_odd_length_subarrays(mut arr: Vec<i32>) -> i32 {
for i in 1..arr.len() {
arr[i] += arr[i - 1];
}
let mut sum = 0;
let mut len = 1;
while len <= arr.len() {
sum += arr[len - 1];
for x in len..arr.len() {
sum += arr[x] - arr[x - len];
}
len += 2;
}
sum
}
}
struct Solution;
fn main() {
assert_eq!(Solution::sum_odd_length_subarrays(vec![1, 4, 2, 5, 3]), 58);
assert_eq!(Solution::sum_odd_length_subarrays(vec![1, 2]), 3);
assert_eq!(Solution::sum_odd_length_subarrays(vec![10, 11, 12]), 66);
}
| true
|
9b1b9d6022e2b6b29106ab2a7c7986cde5880277
|
Rust
|
zmarcantel/epiphone
|
/src/error.rs
|
UTF-8
| 4,981
| 3
| 3
|
[] |
no_license
|
use bincode;
use futures;
use proto::{MessageType};
use std::{io, fmt, error};
use std::error::Error as StdError;
#[derive(Debug)]
pub enum ErrorKind {
Deframe,
SolveError,
SolveFail,
UnexpectedType(Vec<MessageType>, MessageType),
}
impl ErrorKind {
pub fn as_str(&self) -> &str {
match *self {
ErrorKind::Deframe => { "failed to deframe message" }
ErrorKind::SolveError => { "error while solving challenge" }
ErrorKind::SolveFail => { "failed to solve challenge" }
ErrorKind::UnexpectedType(_, _) => { "unexpected message type" }
}
}
}
impl fmt::Display for ErrorKind {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
ErrorKind::UnexpectedType(ref exp, ref got) => {
write!(f, "expected one of {:?}, got {:?}", exp, got)
}
_ => { write!(f, "{}", self.as_str()) }
}
}
}
// TODO: IntoFuture trait
#[derive(Debug)]
pub enum Error {
IO(Option<String>, io::Error),
Serde(Option<String>, Box<bincode::ErrorKind>),
Proto(ErrorKind, String),
Generic(String),
}
impl Error {
pub fn new(err: String) -> Error {
Error::Generic(err)
}
pub fn io(desc: Option<String>, err: io::Error) -> Error {
Error::IO(desc, err)
}
pub fn io_other(err: String) -> Error {
Error::IO(None, io::Error::new(io::ErrorKind::Other, err))
}
pub fn serde(desc: Option<String>, err: Box<bincode::ErrorKind>) -> Error {
Error::Serde(desc, err)
}
//
// proto error kinds
//
pub fn proto(kind: ErrorKind, err: String) -> Error {
Error::Proto(kind, err)
}
pub fn deframe(err: String) -> Error {
Error::Proto(ErrorKind::Deframe, err)
}
pub fn solve_err(err: String) -> Error {
Error::Proto(ErrorKind::SolveError, err)
}
pub fn solve_fail(err: String) -> Error {
Error::Proto(ErrorKind::SolveFail, err)
}
pub fn unexpected(allow: Vec<MessageType>, got: MessageType, err: String) -> Error {
Error::Proto(ErrorKind::UnexpectedType(allow, got), err)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::IO(ref d, ref e) => {
if let Some(ref desc) = *d {
desc.as_str()
} else {
e.description()
}
}
Error::Serde(ref d, ref e) => {
if let Some(ref desc) = *d {
desc.as_str()
} else {
e.description()
}
}
Error::Proto(ref d, _) => {
d.as_str()
}
Error::Generic(ref e) => { e.as_str() }
}
}
fn cause(&self) -> Option<&StdError> {
match *self {
Error::IO(_, ref e) => { Some(e) }
Error::Serde(_, ref e) => { Some(e) }
Error::Proto(_, _) | Error::Generic(_) => { None }
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::IO(ref d, ref e) => {
if d.is_some() {
write!(f, "{}: {}", d.as_ref().unwrap(), e.description())
} else {
write!(f, "{}", e.description())
}
}
Error::Serde(ref d, ref e) => {
if d.is_some() {
write!(f, "{}: {}", d.as_ref().unwrap(), e.description())
} else {
write!(f, "{}", e.description())
}
}
Error::Proto(ref d, ref e) => {
match *d {
ErrorKind::UnexpectedType(_, _) => {
write!(f, "{}: {}", e, d)
}
_ => {
write!(f, "{}: {}", d, e)
}
}
}
Error::Generic(ref e) => {
write!(f, "{}", e.as_str())
}
}
}
}
impl From<String> for Error {
fn from(err: String) -> Error {
Error::new(err)
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::io(None, err)
}
}
impl From<futures::Canceled> for Error {
fn from(_: futures::Canceled) -> Error {
Error::io(None, io::Error::new(io::ErrorKind::Other, "future was canceled"))
}
}
impl From<Box<bincode::ErrorKind>> for Error {
fn from(err: Box<bincode::ErrorKind>) -> Error {
Error::serde(None, err)
}
}
impl Into<io::Error> for Error {
fn into(self) -> io::Error {
match self {
Error::IO(_, e) => {
e
}
_ => {
io::Error::new(io::ErrorKind::Other, self)
}
}
}
}
| true
|
7213977e2e5a7b1b349fe1e09eb9ff18a752a302
|
Rust
|
Davidson-Souza/rust
|
/lamport/src/main.rs
|
UTF-8
| 2,512
| 3.0625
| 3
|
[] |
no_license
|
extern crate sha256;
extern crate rand;
use rand::Rng;
#[allow(unused)]
#[allow(dead_code)]
#[derive(Debug)]
struct LamportPrivkey {
n_length: u8,
priv_key: Vec<u128>,
}
impl LamportPrivkey {
fn new() -> LamportPrivkey {
LamportPrivkey {
n_length:16,
priv_key: Vec::new(),
}
}
fn get(mut self) -> LamportPrivkey {
for i in 0..16 {
let secret_number:u128 = rand::thread_rng().gen_range(1..101);
self.priv_key.push(secret_number);
}
self
}
}
#[derive(Debug)]
pub struct LamportPubKey {
n_length: u8,
pub_key: Vec<String>,
}
impl LamportPubKey {
fn new() -> LamportPubKey {
LamportPubKey {
n_length:8,
pub_key: Vec::new(),
}
}
fn derive(priv_key: &LamportPrivkey) -> LamportPubKey {
let mut pub_key = LamportPubKey {
n_length:8,
pub_key: Vec::new(),
};
for i in 0..priv_key.n_length as usize {
let key = sha256::digest_bytes(&priv_key.priv_key[i].to_le_bytes());
pub_key.pub_key.push(key);
}
pub_key
}
}
#[derive(Debug)]
pub struct LamportSig {
n_length: u8,
pub_key: LamportPubKey,
sig: Vec<u128>
}
impl LamportSig {
fn sign(priv_key: LamportPrivkey, data: &u8) -> LamportSig {
let pub_key = LamportPubKey::derive(&priv_key);
let mut lamport_sig = LamportSig {
n_length: 8,
pub_key: pub_key,
sig: Vec::new(),
};
for i in 0..(lamport_sig.n_length) {
lamport_sig.sig.push(priv_key.priv_key[(if (data & 1) == 1 {2*i} else {(2*i) + 1} as usize)]);
};
lamport_sig
}
fn verify(sig: &LamportSig, data: &u8) -> bool {
let mut verified: bool = true;
let mut index = 0;
for i in sig.sig.iter() {
let hash = sha256::digest_bytes(&i.to_le_bytes());
if hash != sig.pub_key.pub_key[if(data & 1) == 1 { index } else {index + 1} as usize] {
verified = false;
break;
}
index+= 2;
};
verified
}
}
fn main() {
let key = LamportPrivkey::new(); // Create a lamport key
let key = key.get();
println!("{:?}", LamportPubKey::derive(&key));
let data = 1;
let sig = LamportSig::sign(key, &data); // Create sig
println!("{:?}", sig);
println!("{}", LamportSig::verify(&sig, &data));
}
| true
|
cb41763cfe230a74db95bd2bdfe59d95c2ce7717
|
Rust
|
stackcats/leetcode
|
/algorithms/medium/combination_sum_iii.rs
|
UTF-8
| 712
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
fn dfs(
arr: &[i32],
k: usize,
n: i32,
curr_sum: i32,
curr_arr: &mut Vec<i32>,
ans: &mut Vec<Vec<i32>>,
) {
if curr_sum == n && k == curr_arr.len() {
ans.push(curr_arr.clone());
return;
}
if curr_sum > n || k < curr_arr.len() {
return;
}
for i in 0..arr.len() {
let mut cp = curr_arr.clone();
cp.push(arr[i]);
dfs(&arr[i + 1..], k, n, curr_sum + arr[i], &mut cp, ans);
}
}
impl Solution {
pub fn combination_sum3(k: i32, n: i32) -> Vec<Vec<i32>> {
let arr: Vec<i32> = (1..=9).collect();
let mut ans = Vec::new();
dfs(&arr, k as usize, n, 0, &mut vec![], &mut ans);
ans
}
}
| true
|
900f621fd8a1789414d3682e45d79e2ea5ec95c1
|
Rust
|
CedricCouton/ldap3
|
/examples/search_timeout.rs
|
UTF-8
| 815
| 2.546875
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
extern crate ldap3;
use std::error::Error;
use std::time::Duration;
use ldap3::{LdapConn, LdapConnSettings, Scope, SearchEntry};
fn main() {
match do_search() {
Ok(_) => (),
Err(e) => println!("{:?}", e),
}
}
fn do_search() -> Result<(), Box<Error>> {
let ldap = LdapConn::with_settings(
LdapConnSettings::new().set_conn_timeout(Duration::from_secs(5)),
"ldap://localhost:2389")?;
let (rs, res) = ldap
.with_timeout(Duration::from_secs(5))
.search(
"ou=Places,dc=example,dc=org",
Scope::Subtree,
"(&(objectClass=locality)(l=man*))",
vec!["l"]
)?.success()?;
println!("Result: {:?}", res);
for entry in rs {
println!("{:?}", SearchEntry::construct(entry));
}
Ok(())
}
| true
|
4933a7dc7f625f4eb1d5f06dafc5a938ec86ffb4
|
Rust
|
WhoisDavid/advent2019
|
/src/bin/day21.rs
|
UTF-8
| 2,198
| 2.78125
| 3
|
[] |
no_license
|
use advent2019::intcode;
use advent2019::{get_input, AdventResult};
fn main() -> AdventResult<()> {
let program = &get_input::<isize>(21)?.first_row();
solve_part1(program);
solve_part2(program);
Ok(())
}
fn solve_part1(input: &[isize]) {
// NOTE: a jump is 4 tiles long
let springdroid_code = &[
// (No tile ahead = Jump)
"NOT A J", // J = !A
// OR (No tile in 2)
"NOT B T", // T = !B
"OR T J", // J = J || T = !A || !B
// OR (No tile in 3)
"NOT C T", // T = !C
"OR T J", // J = J || T = !A || !B || !C
// AND (tile in 4 = landing)
"AND D J", // J = D && J = D && (!A || !B || !C)
"WALK", // run command
"",
];
run_springdroid(input, springdroid_code)
}
fn solve_part2(input: &[isize]) {
// NOTE: a jump is 4 tiles long
let springdroid_code = &[
// (No tile ahead = Jump)
"NOT A J", // J = !A
// OR (No tile in 2)
"NOT B T", // T = !B
"OR T J", // J = J || T = !A || !B
// OR (No tile in 3)
"NOT C T", // T = !C
"OR T J", // J = J || T = !A || !B || !C
// AND (tile in 4 = landing)
"AND D J", // J = D && J = D && (!A || !B || !C) <= Same as Part 1
// AND (tile in 5 [= tile after landing] OR tile in 8 [= second jump landing])
"NOT E T", // T = !E
"NOT T T", // T = E
"OR H T", // T = E || H
"AND T J", // J = J && T = part1 && (E || H)
"RUN", // run command
"",
];
run_springdroid(input, springdroid_code)
}
fn run_springdroid(input: &[isize], springdroid_code: &[&str]) {
let springdroid_intcode_input: Vec<isize> = springdroid_code
.join("\n")
.chars()
.map(|c| c as u8 as isize)
.collect();
let res = intcode::run_program(input, &springdroid_intcode_input);
println!(
"Springdroid output:\n{}",
res.iter()
.flat_map(|c| if 0 <= *c && *c < 256 {
vec![*c as u8 as char]
} else {
c.to_string().chars().collect::<Vec<_>>()
})
.collect::<String>()
);
}
| true
|
856ab6bd05a0ec90cbd27311acd9640a0b648163
|
Rust
|
kentfredric/grease
|
/src/app/util/parse_atom/atom.rs
|
UTF-8
| 1,098
| 2.828125
| 3
|
[] |
no_license
|
use crate::atom::Atom;
use clap::{App, Arg, ArgMatches, Error, SubCommand};
pub(crate) const NAME: &str = "atom";
pub(crate) const ABOUT: &str =
"Validate/Parse a category/package-version set";
pub(crate) fn subcommand<'x, 'y>() -> App<'x, 'y> {
SubCommand::with_name(NAME).about(ABOUT).arg(
Arg::with_name("ATOM")
.help("The name of a category/package-version set to parse")
.required(true)
.takes_value(true)
.multiple(true)
.empty_values(false),
)
}
pub(crate) fn run(command: &ArgMatches<'_>) -> Result<(), Error> {
let atoms: Vec<&str> = command.values_of("ATOM").unwrap().collect();
for i in atoms {
let p = i.parse::<Atom>().unwrap();
match p.revision() {
Some(r) => println!(
"{} {} {} {}",
p.category(),
p.package(),
p.version(),
r
),
None => {
println!("{} {} {}", p.category(), p.package(), p.version())
},
}
}
Ok(())
}
| true
|
d8542c7a869570f5aaeeb5bce5073b84db04b98b
|
Rust
|
dipique/rust_book
|
/16_concurrency/src/main.rs
|
UTF-8
| 1,706
| 3.203125
| 3
|
[] |
no_license
|
use std::thread;
use std::time::Duration;
mod message_passing;
mod shared_state_concurrency;
mod extensible_concurrency;
fn main() {
use_join_handles();
move_closures();
message_passing::run();
shared_state_concurrency::run();
extensible_concurrency::run();
}
fn use_join_handles() {
// where in C# we could use a task to await, rust has a joinhandle
let handle = thread::spawn(|| {
for i in 1..10 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
// wait for threads to complete
handle.join().unwrap(); // blocks current thread until this thread terminates
}
fn basic_threads() {
thread::spawn(|| {
for i in 1..10 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
//forces thread to stop execution, allowing another
// thread to run
}
});
for i in 1..5 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
// when the main thread closes, all related threads will also
// close even if they haven't completed
}
fn move_closures() {
let v = vec![1,2,3];
let handle = thread::spawn(move || { // force closure to capture (take ownership of) values
println!("Here's a vector: {:?}", v);
});
handle.join().unwrap(); // this also returns a result, which is the result of the closure--again, like task/promise
}
| true
|
42091e81d96e1795b6db5427e0849e4b0c3069cd
|
Rust
|
jbg/rkvm
|
/certificate-gen/src/main.rs
|
UTF-8
| 3,583
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
use anyhow::{Context, Error};
use std::env;
use std::fmt::Write as _;
use std::io::Write;
use std::net::IpAddr;
use std::path::{Path, PathBuf};
use std::process::{self, Command};
use structopt::StructOpt;
use tempfile::NamedTempFile;
fn run(
identity_path: &Path,
certificate_path: &Path,
key_path: &Path,
dns_names: &[String],
ip_addresses: &[IpAddr],
) -> Result<(), Error> {
if dns_names.is_empty() && ip_addresses.is_empty() {
return Err(anyhow::anyhow!(
"No DNS names nor IP addresses were provided"
));
}
let mut config = "[req]
prompt = no
default_bits = 2048
distinguished_name = req_distinguished_name
req_extensions = req_ext
x509_extensions = v3_req
[req_distinguished_name]
commonName = rkvm
countryName = CZ
localityName = rkvm
organizationName = rkvm
organizationalUnitName = IT
stateOrProvinceName = rkvm
emailAddress = nowhere@example.com
[req_ext]
subjectAltName = @alt_names
[v3_req]
subjectAltName = @alt_names
[alt_names]"
.to_owned();
for (i, name) in dns_names.iter().enumerate() {
write!(config, "\nDNS.{} = {}", i + 1, name)?;
}
for (i, address) in ip_addresses.iter().enumerate() {
write!(config, "\nIP.{} = {}", i + 1, address)?;
}
let mut file = NamedTempFile::new().context("Failed to open config file")?;
file.write_all(config.as_bytes())
.context("Failed to write to config file")?;
let openssl = env::var_os("OPENSSL").unwrap_or_else(|| "openssl".to_owned().into());
let code = Command::new(&openssl)
.arg("req")
.arg("-sha256")
.arg("-x509")
.arg("-nodes")
.arg("-days")
.arg("365")
.arg("-newkey")
.arg("rsa:2048")
.arg("-keyout")
.arg(key_path)
.arg("-out")
.arg(certificate_path)
.arg("-config")
.arg(file.path())
.status()
.context("Failed to launch OpenSSL")?
.code();
if code != Some(0) {
return Err(anyhow::anyhow!("OpenSSL exited unsuccessfully"));
}
let code = Command::new(&openssl)
.arg("pkcs12")
.arg("-export")
.arg("-out")
.arg(identity_path)
.arg("-inkey")
.arg(key_path)
.arg("-in")
.arg(certificate_path)
.status()
.context("Failed to launch OpenSSL")?
.code();
if code != Some(0) {
return Err(anyhow::anyhow!("OpenSSL exited unsuccessfully"));
}
Ok(())
}
#[derive(StructOpt)]
#[structopt(
name = "rkvm-certificate-gen",
about = "A tool to generate certificates to use with rkvm"
)]
struct Args {
#[structopt(help = "Path to output identity file (PKCS12 archive)")]
identity_path: PathBuf,
#[structopt(help = "Path to output certificate file (PEM file)")]
certificate_path: PathBuf,
#[structopt(help = "Path to output key file (PEM file)")]
key_path: PathBuf,
#[structopt(
long,
short,
help = "List of DNS names to be used, can be empty if at least one IP address is provided"
)]
dns_names: Vec<String>,
#[structopt(
long,
short,
help = "List of IP addresses to be used, can be empty if at least one DNS name is provided"
)]
ip_addresses: Vec<IpAddr>,
}
fn main() {
let args = Args::from_args();
if let Err(err) = run(
&args.identity_path,
&args.certificate_path,
&args.key_path,
&args.dns_names,
&args.ip_addresses,
) {
println!("Error: {}", err);
process::exit(1);
}
}
| true
|
d3e7201045bdedf63d9a26862b19291d007c45f5
|
Rust
|
curlywurlycraig/rust-learning
|
/panic_guess/src/guess.rs
|
UTF-8
| 414
| 3.65625
| 4
|
[] |
no_license
|
pub struct Guess {
value: i32
}
#[derive(Debug)]
pub enum GuessErr {
TooLow,
TooHigh
}
impl Guess {
pub fn new(value: i32) -> Result<Self, GuessErr> {
if value < 0 {
return Err(GuessErr::TooLow);
} else if value > 100 {
return Err(GuessErr::TooHigh);
}
Ok(Guess { value })
}
pub fn value(&self) -> i32 {
self.value
}
}
| true
|
d258095c9d225b703889bf79d5551d06fb508781
|
Rust
|
kolen/rustzx
|
/src/emulator/loaders/mod.rs
|
UTF-8
| 1,436
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
//! Module contains different media loaders
mod sna;
mod tap;
pub use self::sna::*;
pub use self::tap::*;
use emulator::Emulator;
use std::convert::AsRef;
use std::path::Path;
/// Loads file into emulator instance, auto-detecting file type and
/// executing appropriate action depending on type. For example, for
/// tape images it inserts tape, for snapshots it restores snapshots.
pub fn load_file_autodetect(emulator: &mut Emulator, file: impl AsRef<Path>) {
let extension = file
.as_ref()
.extension()
.and_then(|os_str| os_str.to_str())
.map(|s| s.to_lowercase());
match extension {
Some(ref s) if s == "sna" => {
load_sna(emulator, file)
}
Some(ref s) if s == "tap" => {
emulator.controller.tape.insert(file.as_ref());
}
_ => (),
}
}
#[cfg(test)]
mod tests {
use super::*;
use settings::RustzxSettings;
use std::path::PathBuf;
#[test]
fn load_file_autodetect_load_tap() {
let path: PathBuf = [env!("CARGO_MANIFEST_DIR"), "test", "tapes", "simple.tap"]
.iter()
.collect();
let settings = RustzxSettings::new();
let mut emulator = Emulator::new(&settings);
assert_eq!(emulator.controller.tape.block_byte(0), None);
load_file_autodetect(&mut emulator, &path);
assert_eq!(emulator.controller.tape.block_byte(0), Some(0));
}
}
| true
|
89a29fce7935e548cd67c74d7b0ee9f47cea6dbd
|
Rust
|
Gbps/steamworks-rs
|
/src/gc.rs
|
UTF-8
| 19,143
| 2.546875
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
use super::*;
use pretty_hex::*;
#[cfg(test)]
use serial_test_derive::serial;
use byteorder::{LittleEndian, WriteBytesExt, ReadBytesExt};
pub type GCResult<T> = Result<T, sys::EGCResults>;
pub struct GC<Manager> {
pub(crate) gc: *mut sys::ISteamGameCoordinator,
pub(crate) _inner: Arc<Inner<Manager>>,
}
unsafe impl <Manager> Send for GC<Manager> {}
unsafe impl <Manager> Sync for GC<Manager> {}
impl <Manager> Clone for GC<Manager> {
fn clone(&self) -> Self {
Self {
gc: self.gc,
_inner: self._inner.clone()
}
}
}
/// Represents the properties of a received message
#[derive(Debug)]
pub struct RecvMessageProperties {
/// Raw type of the message. If the MSB is set, this message is a proto message
msg_type: u32,
/// Te size of the message buffer
msg_size: u32
}
#[repr(C)]
struct IGameCoordinator_vtable {
/// virtual EGCResults SendMessage( uint32 unMsgType, const void *pubData, uint32 cubData ) = 0;
#[cfg(all(windows, target_pointer_width = "32"))]
send_message: extern "thiscall" fn(c: *mut sys::ISteamGameCoordinator, unMsgType: u32, pubData: *const u8, cubData: u32) -> sys::EGCResults,
#[cfg(not(all(windows, target_pointer_width = "32")))]
send_message: extern "C" fn(c: *mut sys::ISteamGameCoordinator, unMsgType: u32, pubData: *const u8, cubData: u32) -> sys::EGCResults,
/// virtual bool IsMessageAvailable( uint32 *pcubMsgSize ) = 0;
#[cfg(all(windows, target_pointer_width = "32"))]
is_message_available: extern "thiscall" fn(c: *mut sys::ISteamGameCoordinator, pcubMsgSize: *mut u32) -> bool,
#[cfg(not(all(windows, target_pointer_width = "32")))]
is_message_available: extern "C" fn(c: *mut sys::ISteamGameCoordinator, pcubMsgSize: *mut u32) -> bool,
/// virtual EGCResults RetrieveMessage( uint32 *punMsgType, void *pubDest, uint32 cubDest, uint32 *pcubMsgSize ) = 0;
#[cfg(all(windows, target_pointer_width = "32"))]
retrieve_message: extern "thiscall" fn(c: *mut sys::ISteamGameCoordinator, punMsgType: *mut u32, pubDest: *mut u8, cubDest: u32, pcubMsgSize: *mut u32) -> sys::EGCResults,
#[cfg(not(all(windows, target_pointer_width = "32")))]
retrieve_message: extern "C" fn(c: *mut sys::ISteamGameCoordinator, punMsgType: *mut u32, pubDest: *mut u8, cubDest: u32, pcubMsgSize: *mut u32) -> sys::EGCResults,
}
impl <Manager> GC<Manager> {
/// Get the vtable for ISteamGameCoordinator
fn get_vtable(&self) -> *const IGameCoordinator_vtable
{
unsafe {
debug_assert!(!self.gc.is_null());
let vtable_ref = self.gc as *const *const IGameCoordinator_vtable;
*vtable_ref
}
}
/// Send an encoded message to the GC. Returns Ok(()) if the message was sent successfully, otherwise Err(EGCResults)
/// Automatically prepends the 8-byte GC header
pub fn send_message(&self, msg_type: u32, msg_data: &[u8]) -> GCResult<()>
{
// Write out the header to a temp buffer before sending off
let mut temp_vec: Vec<u8> = Vec::with_capacity(msg_data.len() + 8);
temp_vec.write_u32::<LittleEndian>(msg_type).unwrap();
// no protobuf header (yet)
temp_vec.write_u32::<LittleEndian>(0).unwrap();
temp_vec.extend_from_slice(msg_data);
unsafe {
// Call into ISteamGameCoordinator to send the message
let res = ((*self.get_vtable()).send_message)(self.gc, msg_type, temp_vec.as_ptr(), temp_vec.len() as u32);
if res == sys::EGCResults::k_EGCResultOK {
Ok(())
} else {
Err(res)
}
}
}
/// Receive a message at the head of the queue. If Ok(), a message is written to out_vec and the properties of the message are returned.
/// If the vector does not have enough capacity to hold the message, the vector is grown to fit the message and retried.
/// It is preferable to use the message size returned from the GCMessageAvailable callback to reduce allocations
pub fn recv_message(&self, out_vec: &mut Vec<u8>) -> GCResult<RecvMessageProperties>
{
// if not initialized, resize to a 'reasonable' size
if out_vec.capacity() == 0
{
out_vec.reserve(4096);
}
// Try to write out to the current vector
unsafe {
let mut props = RecvMessageProperties {
msg_type: 0,
msg_size: 0
};
// keep trying until we get the buffer size right
loop {
// attempt to receive the message
let res = ((*self.get_vtable()).retrieve_message)(self.gc, &mut props.msg_type, out_vec.as_mut_ptr(), out_vec.capacity() as u32, &mut props.msg_size);
// if we got the message into the vector
if res == sys::EGCResults::k_EGCResultOK {
// ensure we return the length property of the vector properly
out_vec.set_len(props.msg_size as usize);
// and return the properties of the message
return Ok(props)
} else {
// otherwise, did we pass a vector that was too small?
if res == sys::EGCResults::k_EGCResultBufferTooSmall {
// reserve more space and try again
// notice that RetrieveMessage knows the correct size of the message but will NOT return you
// that value until you pass a buffer large enough... so we just have to keep growing and hope it works
// stupid stupid stupid api design
out_vec.reserve(out_vec.capacity() * 2);
} else {
// otherwise we actually have a real error
return Err(res)
}
}
}
}
}
/// Is there a gc message available in the queue? Returns Some(message_size).
pub fn is_message_available(&self) -> Option<u32> {
unsafe {
let mut out_int: u32 = 0;
let res = ((*self.get_vtable()).is_message_available)(self.gc, &mut out_int);
if res {
Some(out_int)
}else{
None
}
}
}
}
/// Describes an entry in the message queue
#[derive(Debug)]
pub struct GCMessageQueueEntry {
/// Properties of the message, such as length and type
pub props: RecvMessageProperties,
/// The buffer containing the optional header, if present
pub header: Option<Vec<u8>>,
/// The buffer containing the actual message's contents
pub body: Vec<u8>,
/// If true, an error happened while receiving and the other fields are not valid
error: bool
}
type PacketCallbacksTable = Arc<Mutex<HashMap<u32, Box<dyn FnMut(GCMessageQueueEntry) + Send + 'static>>>>;
/// A high level message queue to assist in receiving GC messages easily
pub struct GCMessageQueue<Manager> {
/// A reference to the gc instance
client: Client<Manager>,
/// A reference to the callback registered for the message queue
callback: Option<CallbackHandle<Manager>>,
/// Hashmap which dispatches message id types to callbacks that service them
packet_callbacks: PacketCallbacksTable
}
/// Handle representing a callback that will be dropped automatically
pub struct PktCallbackHandle {
msg_type: u32,
packet_callbacks: PacketCallbacksTable
}
/// Implements automatic drop for install_callback_handle
impl Drop for PktCallbackHandle {
fn drop(&mut self) {
match self.packet_callbacks.lock()
{
// remove the callback from the table if the reference is valid
Ok(mut cbs) => { cbs.remove(&self.msg_type); },
// danging reference to a dropped queue, do nothing.
Err(_) => {}
}
()
}
}
impl<Manager: 'static> GCMessageQueue<Manager> where Manager: crate::Manager {
/// Create a new message queue for a client
pub fn new(client: Client<Manager>) -> Self {
// create the queue
let mut obj = Self {
client,
callback: None,
packet_callbacks: Arc::new(Mutex::new(HashMap::new()))
};
// register a callback to service this queue
obj.start_recv();
return obj
}
/// Begin receiving GC packets into the queue
fn start_recv(&mut self)
{
// get a gc reference
let gc = self.client.gc();
let callbacks_ref = self.packet_callbacks.clone();
let callback = move |v: GCMessageAvailable| {
// receive the message from the queue and ensure
let mut buf: Vec<u8> = Vec::with_capacity(v.message_size as usize);
let res = gc.recv_message(&mut buf);
// did we receive a message?
if let Ok(x) = res {
// parse the size of the optional proto header
let mut header_size = &buf[4..8];
let header_size = header_size.read_u32::<LittleEndian>();
let header_size = match header_size {
Err(_) => return,
Ok(x) => x
} as usize;
let mut header: Option<Vec<u8>> = None;
let body: Vec<u8>;
// is there an optional header?
if header_size > 0 {
// read the header separately
let hdr = Vec::from(&buf[8..(8+header_size)]);
header = Some(hdr);
// read the body afterwards
body = Vec::from(&buf[(8+header_size)..]);
}
else {
// otherwise, just read the body since the header is 0
body = Vec::from(&buf[8..]);
}
// okay, let's fire a callback for that type
if let Ok(mut ht) = callbacks_ref.lock()
{
// do we have a callback entry for this type?
let entry = ht.get_mut(&x.msg_type);
if entry.is_some() {
// call the callback passing the data we just received!
let cb = entry.unwrap();
cb(GCMessageQueueEntry {
props: x,
header,
body,
error: false
});
}
}
} else {
// TODO: Error report
return;
}
};
// register the callback and hold a reference to it
self.callback = Some(self.client.register_callback(callback));
}
/// Add a message to send to the queue. Returns true if the message was successfully queued.
/// The message data must NOT include the 8-byte header. This is added automatically.
pub fn send_message(&self, msg_type: u32, msg_data: &[u8]) -> bool
{
let gc = self.client.gc();
let res = gc.send_message(msg_type, msg_data);
if let Ok(_) = res {
true
} else {
false
}
}
/// Establish a permanent callback function to call whenever a packet of a certain type is received
///
/// Only one callback for a specific message type can exist per queue, if one already exists
/// the callback is replaced with this one.
pub fn install_global_callback<C>(&self, msg_type: u32, callback_fn: C)
where C: FnMut(GCMessageQueueEntry) + Send + 'static
{
self.packet_callbacks.lock().unwrap().insert(msg_type, Box::new(callback_fn));
}
/// Establish a temporary callback function to call whenever a packet of a certain type is received.
/// This is used to establish a callback function for a packet type which only exists in the scope
/// of the returned handle.
///
/// Only one callback for a specific message type can exist per queue, if one already exists
/// the callback is replaced with this one.
///
/// Returns a handle object which, when dropped, will remove the callback type specified by msg_type.
/// NOTE: this does not protect against races by other calls to install_handle.
#[must_use]
pub fn install_callback<C>(&self, msg_type: u32, callback_fn: C) -> PktCallbackHandle
where C: FnMut(GCMessageQueueEntry) + Send + 'static
{
let mut guard = self.packet_callbacks.lock().unwrap();
guard.insert(msg_type, Box::new(callback_fn));
// return a handle which will automatically drop the callback type
PktCallbackHandle {
msg_type,
packet_callbacks: self.packet_callbacks.clone()
}
}
/// Removes a previously registered callback by its message type
/// If no callback was registered already for that type, returns false. Otherwise, returns true.
pub fn remove_callback(&self, msg_type: u32) -> bool
{
self.packet_callbacks.lock().unwrap().remove(&msg_type).is_some()
}
}
#[derive(Clone, Debug)]
pub struct GCMessageAvailable {
pub message_size: u32,
}
const CALLBACK_BASE: i32 = 1700;
/// Callback object to track when a GC message is available in the queue
unsafe impl Callback for GCMessageAvailable {
const ID: i32 = CALLBACK_BASE + 1;
const SIZE: i32 = ::std::mem::size_of::<sys::GCMessageAvailable_t>() as i32;
unsafe fn from_raw(raw: *mut libc::c_void) -> Self {
let val = &mut *(raw as *mut sys::GCMessageAvailable_t);
GCMessageAvailable {
message_size: val.m_nMessageSize
}
}
}
#[derive(Clone, Debug)]
pub struct GCMessageFailed {
}
/// Callback object to track when a GC message failed to send/recv
unsafe impl Callback for GCMessageFailed {
const ID: i32 = CALLBACK_BASE + 2;
const SIZE: i32 = ::std::mem::size_of::<sys::GCMessageFailed_t>() as i32;
unsafe fn from_raw(_raw: *mut libc::c_void) -> Self {
GCMessageFailed {
}
}
}
#[cfg(test)]
const CLIENT_HELLO_MESSAGE_ID: u32 = 0x80000000 + 4006;
#[cfg(test)]
const CLIENT_WELCOME_MESSAGE_ID: u32 = 0x80000000 + 4004;
#[test]
#[serial]
fn test_basic() {
// ensure we can connect to a client and create a gc interface
let (client, single) = Client::init().unwrap();
let gc = client.gc();
assert!(!gc.gc.is_null());
println!("ISteamGameCoordinator: {:p}", gc.gc);
// ensure at is_message_available is working correctly
let res = gc.is_message_available();
assert_eq!(None, res);
// register the callback and read messages as they come in
let gc_clone = gc.clone();
let _cb = client.register_callback( move |v: GCMessageAvailable| {
println!("Queued pending gc message: {}", v.message_size);
// receive the message from the queue and ensure
let mut dummy: Vec<u8> = Vec::with_capacity(v.message_size as usize);
let res = gc_clone.recv_message(&mut dummy);
if let Ok(x) = res {
println!("--- Received Message ---");
println!("message_type: {}", x.msg_type & 0x7FFFFFFF);
println!("message_size: {}", x.msg_size);
println!("------------------------");
} else {
panic!("GC error on receive, returned: {}", res.unwrap_err() as u32);
}
});
// run some callbacks
for _ in 0 .. 20 {
single.run_callbacks();
::std::thread::sleep(::std::time::Duration::from_millis(50));
}
// send a GC hello
let res = gc.send_message(CLIENT_HELLO_MESSAGE_ID, &[]);
dbg!(res.unwrap());
// run some more callbacks waiting for the GC welcome
for _ in 0 .. 20 {
single.run_callbacks();
::std::thread::sleep(::std::time::Duration::from_millis(50));
}
}
#[test]
#[serial]
fn test_queue() {
// ensure we can connect to a client and create a gc interface
let (client, single) = Client::init().unwrap();
// let steam warm up
::std::thread::sleep(::std::time::Duration::from_millis(1000));
// notify us when the message is successfully received in the other thread
let (sender, receiver) = std::sync::mpsc::channel::<bool>();
// spawn a thread to do send/recv operations
let _ = std::thread::spawn(move || {
let queue = GCMessageQueue::new(client.clone());
// install a callback to respond for the welcome packet
queue.install_global_callback(CLIENT_WELCOME_MESSAGE_ID, move |pkt| {
dbg!(&pkt.props);
// tell the test thread that we were successful
sender.send(true).unwrap();
});
// send a k_EMsgGCClientHello
dbg!(queue.send_message(CLIENT_HELLO_MESSAGE_ID, &[]));
// keep the thread and queue alive
::std::thread::sleep(::std::time::Duration::from_millis(3000));
});
// loop performing callbacks here
for _ in 0 .. 50 {
single.run_callbacks();
::std::thread::sleep(::std::time::Duration::from_millis(50));
// did the packet successfully get received?
if let Ok(_) = receiver.try_recv()
{
// successfully received the message, exit test
return
}
}
panic!("Did not receive GC welcome packet.");
}
#[test]
#[serial]
fn test_callback_handle() {
// ensure we can connect to a client and create a gc interface
let (client, single) = Client::init().unwrap();
// let steam warm up
::std::thread::sleep(::std::time::Duration::from_millis(1000));
// notify us when the message is successfully received in the other thread
let (sender, receiver) = std::sync::mpsc::channel::<bool>();
// spawn a thread to do send/recv operations
let other_thrd = std::thread::spawn(move || {
let queue = GCMessageQueue::new(client.clone());
{
// install a callback to respond for the welcome packet
let _hndl = queue.install_callback(CLIENT_WELCOME_MESSAGE_ID, move |pkt| {
dbg!(&pkt.props);
// tell the test thread that we were successful
sender.send(true).unwrap();
});
// send a k_EMsgGCClientHello
dbg!(queue.send_message(CLIENT_HELLO_MESSAGE_ID, &[]));
// keep the thread and queue alive
::std::thread::sleep(::std::time::Duration::from_millis(1000));
}
// ensure our callback was dropped
assert_eq!(queue.packet_callbacks.lock().unwrap().len(), 0);
});
// loop performing callbacks here
for _ in 0 .. 50 {
single.run_callbacks();
::std::thread::sleep(::std::time::Duration::from_millis(50));
// did the packet successfully get received?
if let Ok(_) = receiver.try_recv()
{
// successfully received the message, exit test
other_thrd.join();
return
}
}
panic!("Did not receive GC welcome packet.");
}
| true
|
34953cd8afbac4c2630ac6551befd45b3f5aa5a9
|
Rust
|
tobiasvandriessel/AoC
|
/Day1Part1/src/main.rs
|
UTF-8
| 1,554
| 3.34375
| 3
|
[] |
no_license
|
use std::io;
use std::io::prelude::*;
use std::io::BufReader;
use std::fs::File;
fn main() -> io::Result<()> {
let mut negativeArray: [u8; 200000] = [0; 200000];
let mut positiveArray: [u8; 200000] = [0; 200000];
let mut freq: i32 = 0;
while true {
let f = File::open("input.txt")?;
let reader = BufReader::new(f);
for line in reader.lines() {
if(freq < 0){
let negFreq = freq * -1;
negativeArray[negFreq as usize] += 1;
if negativeArray[negFreq as usize] > 1 {
println!("first repeated frequency is {}", freq );
return Ok(());
}
} else {
positiveArray[freq as usize] += 1;
if positiveArray[freq as usize] > 1 {
println!("first repeated frequency is {}", freq );
return Ok(());
}
}
// println!("{}", line?);
let num : i32;
match line {
Ok(n) => num = n.trim().parse().unwrap(),
Err(e) => {
println!("Something went wrong with the line" );
println!("Error: {}", e );
return Ok(());
}
}
freq += num;
}
}
println!(" Output number: {}", freq);
// println!("You typed: {}", input.trim());
Ok(())
// println!("Hello, world!");
}
| true
|
ad1329dd8ada27218bd4f9b7820ec5132e316524
|
Rust
|
twetzel59/fmmc_gui
|
/src/application/window.rs
|
UTF-8
| 2,141
| 3.3125
| 3
|
[
"Unlicense"
] |
permissive
|
//! This module is everything that manages the app's main window
//! and event loop.
use sfml::graphics::{Color, RenderTarget, RenderWindow};
use sfml::system::Vector2f;
use sfml::window::{ContextSettings, Event, mouse, Style, VideoMode};
use widget::Widget;
const COLOR_DEPTH: u32 = 32;
/// The core of a GUI application. Manages a window and polls events,
/// dispatching to your callbacks.
pub struct AppWindow<'s> {
win: RenderWindow,
widgets: Vec<Box<Widget<'s> + 's>>,
}
impl<'s> AppWindow<'s> {
/// Create a new app. It will not start until explicitly started.
/// To create a UI, call this. Then, add your signals and widgets.
/// Finally, call `start()`.
pub fn new(size: (u32, u32), title: &str, decoration: bool) -> AppWindow<'s> {
let win_style = if decoration {
Style::DEFAULT
} else {
Style::NONE
};
AppWindow {
win: RenderWindow::new(VideoMode::new(size.0, size.1, COLOR_DEPTH),
title, win_style, &ContextSettings::default()),
widgets: Vec::new(),
}
}
/// Adds a widget to the UI. Connect signals to the widget first.
pub fn add<T: Widget<'s> + 's>(&mut self, widget: T) {
self.widgets.push(Box::new(widget));
}
/// Launch the main loop!
pub fn start(&mut self) {
'outer: loop {
self.win.clear(&Color::BLACK);
for i in &self.widgets {
i.draw(&mut self.win);
}
self.win.display();
while let Some(e) = self.win.poll_event() {
match e {
Event::Closed => break 'outer,
Event::MouseButtonPressed { button: mouse::Button::Left, x, y } => {
for i in &mut self.widgets {
if i.contains_point(Vector2f::new(x as f32, y as f32)) {
i.signal_manager().handle_click();
}
}
},
_ => {},
}
}
}
}
}
| true
|
57a810219f6ee95391759569f132c35524c0e7b7
|
Rust
|
richardeoin/qrp_rust
|
/src/interleave.rs
|
UTF-8
| 1,229
| 3.234375
| 3
|
[] |
no_license
|
//! Van-der-corput style interleaver for WSJT modes
use bithacks;
///
/// Binary van-der-corput interleave sequence. Max 8-bits
///
pub struct InterleaveSeq {
index: u8,
maximum: u8,
}
impl Iterator for InterleaveSeq {
type Item = u8;
// Next sequence element
fn next(&mut self) -> Option<u8> {
loop {
if self.index != 0xff {
let reversed: u8 = bithacks::reverse_8(self.index);
self.index += 1;
if reversed < self.maximum {
return Some(reversed)
}
} else {
return None
}
}
}
}
///
/// New InterleaveSeq
///
pub fn interleave_seq(max: u8) -> InterleaveSeq {
InterleaveSeq { index: 0, maximum: max }
}
///
/// Unit tests
///
#[cfg(test)]
mod test {
use super::*;
///
/// Interleaver
///
#[test]
fn check_interleaver() {
for (i,j) in interleave_seq(4).enumerate() {
match i {
0 => assert_eq!(j, 0),
1 => assert_eq!(j, 2),
2 => assert_eq!(j, 1),
3 => assert_eq!(j, 3),
_ => assert_eq!(0, 0)
}
}
}
}
| true
|
6ad5c2fb1595e6013f380d65855a0a1b8ffdf3a0
|
Rust
|
PacktPublishing/Mastering-Rust
|
/Chapter06/functions-with-borrows-2.rs
|
UTF-8
| 409
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
fn take_the_n(n: &mut u8) {
println!("The n is {}", *n);
*n=10;
}
fn take_the_s(s: &String) {
}
fn take_the_foo(f: &Foo) {
println!("Foo: {:?}", *f);
}
#[derive(Debug)]
struct Foo {}
fn main() {
let mut n = 5;
let s = String::from("string");
let f = Foo {};
take_the_n(&n);
take_the_s(&s);
take_the_foo(&f);
println!("n is {}", n);
println!("s is {}", s);
}
| true
|
df5af57072f5cc2bada3d048e73ec3f380281f12
|
Rust
|
hoangpq/ClojureRS
|
/src/environment.rs
|
UTF-8
| 4,990
| 2.953125
| 3
|
[] |
no_license
|
use crate::namespace::{Namespace, Namespaces};
use crate::rust_core;
use crate::value::{ToValue, Value};
use crate::Symbol;
use crate::repl;
use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::Rc;
// @TODO lookup naming convention
/// Inner value of our environment
/// See Environment for overall purpose
#[derive(Debug, Clone)]
pub struct EnvironmentVal {
curr_ns: Namespace,
namespaces: Namespaces,
}
impl EnvironmentVal {
/// Default main environment
fn new_main_val() -> EnvironmentVal {
EnvironmentVal {
curr_ns: Namespace::new(Symbol::intern("user"), RefCell::new(HashMap::new())),
namespaces: Namespaces(RefCell::new(HashMap::new())),
}
}
}
/// Our environment keeps track of the meaning of things 'right here', relative to where
/// something is at (meaning, a form inside of a let might have a different meaning for
/// the symbol x than a form outside of it, with a let introducing an additional local environment
///
/// Stores our namespaces and our current namespace, which themselves personally store our symbols
/// mapped to values
#[derive(Debug, Clone)]
pub enum Environment {
MainEnvironment(EnvironmentVal),
/// Points to parent environment
/// Introduced by Closures, and by let
LocalEnvironment(Rc<Environment>, RefCell<HashMap<Symbol, Rc<Value>>>),
}
use Environment::*;
impl Environment {
pub fn new_main_environment() -> Environment {
MainEnvironment(EnvironmentVal::new_main_val())
}
pub fn new_local_environment(outer_environment: Rc<Environment>) -> Environment {
LocalEnvironment(outer_environment, RefCell::new(HashMap::new()))
}
pub fn insert(&self, sym: Symbol, val: Rc<Value>) {
match self {
MainEnvironment(EnvironmentVal { curr_ns, .. }) => {
curr_ns.insert(sym, val);
}
LocalEnvironment(_, mappings) => {
mappings.borrow_mut().insert(sym, val);
}
}
}
pub fn get(&self, sym: &Symbol) -> Rc<Value> {
match self {
MainEnvironment(EnvironmentVal { curr_ns, .. }) => curr_ns.get(sym),
LocalEnvironment(parent_env, mappings) => match mappings.borrow().get(sym) {
Some(val) => Rc::clone(val),
None => parent_env.get(sym),
},
}
}
pub fn clojure_core_environment() -> Rc<Environment> {
// Register our macros / functions ahead of time
let add_fn = rust_core::AddFn {};
let str_fn = rust_core::StrFn {};
let do_fn = rust_core::DoFn {};
let nth_fn = rust_core::NthFn {};
let do_macro = rust_core::DoMacro {};
let concat_fn = rust_core::ConcatFn {};
let print_string_fn = rust_core::PrintStringFn {};
// Hardcoded fns
let lexical_eval_fn = Value::LexicalEvalFn {};
// Hardcoded macros
let let_macro = Value::LetMacro {};
let quote_macro = Value::QuoteMacro {};
let def_macro = Value::DefMacro {};
let fn_macro = Value::FnMacro {};
let defmacro_macro = Value::DefmacroMacro {};
let environment = Rc::new(Environment::new_main_environment());
let eval_fn = rust_core::EvalFn::new(Rc::clone(&environment));
environment.insert(Symbol::intern("+"), add_fn.to_rc_value());
environment.insert(Symbol::intern("let"), let_macro.to_rc_value());
environment.insert(Symbol::intern("str"), str_fn.to_rc_value());
environment.insert(Symbol::intern("quote"), quote_macro.to_rc_value());
environment.insert(Symbol::intern("def"), def_macro.to_rc_value());
environment.insert(Symbol::intern("fn"), fn_macro.to_rc_value());
environment.insert(Symbol::intern("defmacro"), defmacro_macro.to_rc_value());
environment.insert(Symbol::intern("eval"), eval_fn.to_rc_value());
environment.insert(Symbol::intern("+"), add_fn.to_rc_value());
environment.insert(Symbol::intern("let"), let_macro.to_rc_value());
environment.insert(Symbol::intern("str"), str_fn.to_rc_value());
environment.insert(Symbol::intern("quote"), quote_macro.to_rc_value());
environment.insert(Symbol::intern("do-fn*"), do_fn.to_rc_value());
environment.insert(Symbol::intern("do"), do_macro.to_rc_value());
environment.insert(Symbol::intern("def"), def_macro.to_rc_value());
environment.insert(Symbol::intern("fn"), fn_macro.to_rc_value());
environment.insert(Symbol::intern("defmacro"), defmacro_macro.to_rc_value());
environment.insert(Symbol::intern("eval"), eval_fn.to_rc_value());
environment.insert(
Symbol::intern("lexical-eval"),
lexical_eval_fn.to_rc_value(),
);
environment.insert(Symbol::intern("nth"), nth_fn.to_rc_value());
environment.insert(Symbol::intern("concat"), concat_fn.to_rc_value());
environment.insert(
Symbol::intern("print-string"),
print_string_fn.to_rc_value(),
);
//
// Read in clojure.core
//
let _ = repl::try_eval_file(&environment, "./src/clojure/core.clj");
environment
}
}
| true
|
a656577629edab32e0a3a7f3abb2db15b8a33429
|
Rust
|
ubnt-intrepid/ring-rt
|
/src/io_driver.rs
|
UTF-8
| 2,352
| 2.6875
| 3
|
[
"Unlicense"
] |
permissive
|
use crate::{
event::{Event, RawEvent},
semaphore::{Permit, Semaphore},
};
use futures::channel::oneshot;
use iou::IoUring;
use std::{cell::RefCell, io, rc::Rc};
const SQ_CAPACITY: u32 = 16;
struct UserData {
permit: Permit,
tx: oneshot::Sender<*mut ()>,
event: RawEvent,
}
struct Inner {
ring: RefCell<IoUring>,
sq_capacity: Semaphore,
}
impl Inner {
async fn submit_event(&self, mut event: RawEvent) -> oneshot::Receiver<*mut ()> {
let permit = self.sq_capacity.acquire().await;
let mut ring = self.ring.borrow_mut();
let mut sqe = ring.next_sqe().expect("SQ is full");
unsafe {
event.prepare(&mut sqe);
}
let (tx, rx) = oneshot::channel();
let user_data = Box::new(UserData { permit, tx, event });
sqe.set_user_data(Box::into_raw(user_data) as _);
rx
}
}
pub struct Driver {
inner: Rc<Inner>,
}
impl Driver {
pub fn new() -> io::Result<Self> {
let ring = IoUring::new(SQ_CAPACITY)?;
Ok(Self {
inner: Rc::new(Inner {
ring: RefCell::new(ring),
sq_capacity: Semaphore::new(SQ_CAPACITY as usize),
}),
})
}
pub fn handle(&self) -> Handle {
Handle {
inner: self.inner.clone(),
}
}
pub(crate) fn submit_and_wait(&mut self) -> io::Result<()> {
let mut ring = self.inner.ring.borrow_mut();
ring.sq().submit_and_wait(1)?;
while let Some(mut cqe) = ring.cq().peek_for_cqe() {
unsafe {
let UserData {
mut event,
tx,
permit,
} = *Box::from_raw(cqe.user_data() as *mut UserData);
let output = event.complete(&mut cqe);
let _ = tx.send(output);
drop(permit);
}
}
Ok(())
}
}
#[derive(Clone)]
pub struct Handle {
inner: Rc<Inner>,
}
impl Handle {
pub(crate) async fn submit<E: Event>(&self, event: E) -> E::Output
where
E: Event,
{
let rx = self.inner.submit_event(RawEvent::new(event)).await;
let output = rx.await.unwrap();
unsafe {
let output = Box::from_raw(output as *mut E::Output);
*output
}
}
}
| true
|
34ec3ae550ae6834bea04713eab16a7006779708
|
Rust
|
fulara/knw_share
|
/multi/src/x05_showcase_rc.rs
|
UTF-8
| 1,006
| 3.03125
| 3
|
[] |
no_license
|
use std::thread;
use std::sync::Arc;
use std::rc::Rc;
use std::thread::sleep_ms;
use std::sync::Mutex;
#[test]
fn rc_showcase() {
// let m = Rc::new(Mutex::new(Vec::new()));
//
// for i in 0 .. 10 {
// let m = m.clone();
// thread::spawn(move || {
// let mut data = m.lock().unwrap();
// println!("thread {} woke up and doing extensive work.", i);
// sleep_ms(500);
// data.push(i);
// });
// }
//
// loop {
// {
// let data : &Vec<i32> = &m.lock().unwrap();
// println!("main thread is peeking at data! {:?}", data);
// }
//
// sleep_ms(1000);
// }
}
#[test]
fn channels_cheatsheet() {
// let (tx, rx) = channel();
//
// let handle = thread::spawn(move|| {
// loop {
// tx.send("hai hai");
//
// sleep_ms(1000);
// }
// });
//
// for d in rx {
// println!("hai from main thread...!");
// }
}
| true
|
0d8ade6c64df66d672dba9311b6a53db4b1c52f9
|
Rust
|
koboriakira/exercism-rust
|
/rust/rna-transcription/src/lib.rs
|
UTF-8
| 1,202
| 3.1875
| 3
|
[] |
no_license
|
#[derive(Debug, PartialEq)]
pub struct Dna {
nucleotides: Vec<char>,
}
#[derive(Debug, PartialEq)]
pub struct Rna {
nucleotides: Vec<char>,
}
impl Dna {
pub fn new(dna: &str) -> Result<Dna, usize> {
validate(dna, &['A', 'C', 'G', 'T']).and_then(|dna| {
Ok(Dna {
nucleotides: dna.chars().collect(),
})
})
}
pub fn into_rna(self) -> Rna {
let rna: String = self
.nucleotides
.iter()
.map(|n| match n {
'G' => 'C',
'C' => 'G',
'T' => 'A',
'A' => 'U',
_ => panic!()
})
.collect();
Rna::new(&rna).ok().unwrap()
}
}
impl Rna {
pub fn new(rna: &str) -> Result<Rna, usize> {
validate(rna, &['A', 'C', 'G', 'U']).and_then(|rna| {
Ok(Rna {
nucleotides: rna.chars().collect(),
})
})
}
}
fn validate<'a>(dna: &'a str, nucleotides: &[char]) -> Result<&'a str, usize> {
if let Some(position) = dna.chars().position(|c| !nucleotides.contains(&c)) {
Err(position)
} else {
Ok(dna)
}
}
| true
|
f1462d350f66540f26f29d4bbc074211467036d7
|
Rust
|
mantono/giss
|
/src/ui.rs
|
UTF-8
| 4,457
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
use std::{io::Write, sync::mpsc::RecvTimeoutError};
use std::{sync::mpsc::Receiver, time::Duration};
use itertools::Itertools;
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
use user::Username;
use crate::{
cfg::Config,
issue::{Issue, Label, UserFields},
search::Type,
sort::Sorting,
user, AppErr,
};
pub struct DisplayConfig {
colors: ColorChoice,
sorting: Sorting,
user: Option<Username>,
limit: u32,
links: bool,
}
impl From<&Config> for DisplayConfig {
fn from(cfg: &Config) -> Self {
DisplayConfig {
colors: cfg.colors(),
limit: cfg.limit(),
user: cfg.username(),
sorting: cfg.sorting(),
links: cfg.show_links(),
}
}
}
pub fn display(channel: Receiver<Issue>, cfg: DisplayConfig) -> Result<(), AppErr> {
let mut limit: u32 = cfg.limit * 3;
let mut queue: Vec<Issue> = Vec::with_capacity(limit as usize);
while limit > 0 {
match channel.recv_timeout(Duration::from_secs(20)) {
Ok(issue) => {
queue.push(issue);
limit -= 1;
}
Err(e) => match e {
RecvTimeoutError::Timeout => return Err(AppErr::Timeout),
RecvTimeoutError::Disconnected => limit = 0,
},
};
}
queue.sort_unstable_by(|i0, i1| cfg.sorting.sort(i0, i1));
queue
.into_iter()
.unique_by(|i| i.id)
.take(cfg.limit as usize)
.for_each(|i| print_issue(i, true, &cfg));
Ok(())
}
fn print_issue(issue: Issue, print_repo: bool, cfg: &DisplayConfig) {
let use_colors: ColorChoice = cfg.colors;
let title: String = truncate(issue.title.clone(), 50);
let assignees: String = issue
.assignees
.nodes
.iter()
.map(|a: &UserFields| &a.login)
.map(|s: &String| format!("{}{}", "@", s))
.collect::<Vec<String>>()
.join(", ");
let repo: String = if print_repo {
issue.repository.name_with_owner.clone()
} else {
String::from("")
};
let labels: String = issue
.labels
.nodes
.iter()
.map(|l: &Label| &l.name)
.map(|s: &String| format!("{}{}", "#", s))
.collect::<Vec<String>>()
.join(", ");
let mut stdout = StandardStream::stdout(use_colors);
print_type(&mut stdout, &issue, cfg);
let target: String = if print_repo {
format!("#{} {}", issue.number, repo)
} else {
format!("#{}", issue.number)
};
write(&mut stdout, target.as_str(), None);
delimiter(&mut stdout);
write(&mut stdout, &title, None);
if !assignees.is_empty() {
delimiter(&mut stdout);
write(&mut stdout, &assignees, Some(Color::Cyan));
}
if !labels.is_empty() {
delimiter(&mut stdout);
write(&mut stdout, &labels, Some(Color::Magenta));
}
if cfg.links {
delimiter(&mut stdout);
write(&mut stdout, &issue.link(), Some(Color::Blue));
}
write(&mut stdout, "\n", None);
}
fn print_type(stream: &mut StandardStream, issue: &Issue, cfg: &DisplayConfig) {
let kind: Type = match issue.kind {
Type::Issue => Type::Issue,
_ => match &cfg.user {
Some(user) => match issue.has_review_request(&user.0) {
true => Type::ReviewRequest,
false => Type::PullRequest,
},
None => Type::PullRequest,
},
};
match kind {
crate::search::Type::Issue => write(stream, "I ", Some(Color::Blue)),
crate::search::Type::PullRequest => write(stream, "P ", Some(Color::Magenta)),
crate::search::Type::ReviewRequest => {
write(stream, "P", Some(Color::Magenta));
write(stream, "R", Some(Color::Yellow));
}
};
write(stream, "| ", Some(Color::Green));
}
fn delimiter(stream: &mut StandardStream) {
write(stream, " | ", Some(Color::Green));
}
fn truncate(string: String, max_length: usize) -> String {
let new_length: usize = std::cmp::min(string.len(), max_length);
if new_length < string.len() {
string[..new_length].to_string()
} else {
string
}
}
fn write(stream: &mut StandardStream, content: &str, color: Option<Color>) {
stream.set_color(ColorSpec::new().set_fg(color)).unwrap();
write!(stream, "{}", content).unwrap();
}
| true
|
a3088ceb7cc924069fa38c5df57569cab9017898
|
Rust
|
gadomski/cpd-rs
|
/src/runner.rs
|
UTF-8
| 8,339
| 3.25
| 3
|
[] |
no_license
|
//! Run cpd algorithms.
use {Matrix, Normalize, Registration, Rigid, UInt};
use failure::Error;
use gauss_transform::Transformer;
use generic_array::ArrayLength;
use nalgebra::DimName;
use std::f64;
use std::ops::Mul;
const DEFAULT_ERROR_CHANGE_THRESHOLD: f64 = 1e-5;
const DEFAULT_MAX_ITERATIONS: usize = 150;
const DEFAULT_OUTLIER_WEIGHT: f64 = 0.1;
const DEFAULT_SIGMA2_THRESHOLD: f64 = f64::EPSILON * 10.;
/// Generic interface for running cpd registration methods.
///
/// Use the builder pattern to configure how cpd registrations are run.
///
/// ```
/// use cpd::Runner;
/// let runner = Runner::new().max_iterations(100); // etc
/// ```
///
/// Use methods like `rigid()` to specifiy the type of registration, and convert the builder into a
/// method-specific builder:
///
/// ```
/// use cpd::Runner;
/// let runner = Runner::new().rigid();
/// ```
#[derive(Clone, Copy, Debug)]
pub struct Runner {
error_change_threshold: f64,
max_iterations: usize,
normalize: Normalize,
outlier_weight: f64,
sigma2: Option<f64>,
sigma2_threshold: f64,
}
/// The result of a cpd run.
#[derive(Debug)]
pub struct Run<D, T>
where
D: DimName,
{
/// Did this run converge?
pub converged: bool,
/// The number of iterations.
pub iterations: usize,
/// The moved points.
pub moved: Matrix<D>,
/// The transform returned by the registration method.
pub transform: T,
}
impl Runner {
/// Creates a new, default runner.
///
/// # Examples
///
/// ```
/// use cpd::Runner;
/// let runner = Runner::new();
/// ```
pub fn new() -> Runner {
Runner::default()
}
/// Sets the error change threshold.
///
/// Make this lower if you want to get more precise.
///
/// # Examples
///
/// ```
/// use cpd::Runner;
/// let runner = Runner::new().error_change_threshold(1e-8);
/// ```
pub fn error_change_threshold(mut self, error_change_threshold: f64) -> Runner {
self.error_change_threshold = error_change_threshold;
self
}
/// Sets the maximum number of iterations when running cpd.
///
/// # Examples
///
/// ```
/// use cpd::Runner;
/// let runner = Runner::new().max_iterations(100);
/// ```
pub fn max_iterations(mut self, max_iterations: usize) -> Runner {
self.max_iterations = max_iterations;
self
}
/// Sets the normalization strategy.
///
/// # Examples
///
/// ```
/// use cpd::{Normalize, Runner};
/// let runner = Runner::new().normalize(Normalize::Independent);
/// ```
pub fn normalize(mut self, normalize: Normalize) -> Runner {
self.normalize = normalize;
self
}
/// Sets the outlier weight.
///
/// Does *not* check to see whether it is a valid value, yet.
///
/// # Examples
///
/// ```
/// use cpd::Runner;
/// let runner = Runner::new().outlier_weight(0.2);
/// ```
pub fn outlier_weight(mut self, outlier_weight: f64) -> Runner {
self.outlier_weight = outlier_weight;
self
}
/// Sets the initial sigma2.
///
/// If none, use the default sigma2 as calculated from the matrices.
///
/// # Examples
///
/// ```
/// use cpd::Runner;
/// let runner = Runner::new().sigma2(1.1).sigma2(None);
/// ```
pub fn sigma2<T: Into<Option<f64>>>(mut self, sigma2: T) -> Runner {
self.sigma2 = sigma2.into();
self
}
/// Returns true if this runner requires scaling, usually because of the normalization.
///
/// # Examples
///
/// ```
/// use cpd::{Normalize, Runner};
/// assert!(Runner::new().normalize(Normalize::Independent).requires_scaling());
/// assert!(!Runner::new().normalize(Normalize::SameScale).requires_scaling());
/// ```
pub fn requires_scaling(&self) -> bool {
self.normalize.requires_scaling()
}
/// Returns a rigid registration builder that will use this runner.
///
/// # Examples
///
/// ```
/// use cpd::Runner;
/// let rigid = Runner::new().rigid();
/// ```
pub fn rigid(self) -> Rigid {
self.into()
}
/// Runs a `Registration`.
///
/// # Examples
///
/// ```
/// use cpd::{Runner, Rigid, utils, U2};
///
/// let runner = Runner::new();
/// let rigid = Rigid::new();
/// let registration = rigid.as_registration::<U2>().unwrap();
/// let matrix = utils::random_matrix2(10);
/// let run = runner.run(&matrix, &matrix, registration).unwrap();
/// ```
pub fn run<D, R>(
&self,
fixed: &Matrix<D>,
moving: &Matrix<D>,
mut registration: R,
) -> Result<Run<D, R::Transform>, Error>
where
R: Registration<D> + Into<<R as Registration<D>>::Transform>,
D: DimName,
UInt: Mul<<D as DimName>::Value>,
<D as DimName>::Value: Mul + Mul<UInt>,
<UInt as Mul<<D as DimName>::Value>>::Output: ArrayLength<f64>,
<<D as DimName>::Value as Mul>::Output: ArrayLength<f64>,
<<D as DimName>::Value as Mul<UInt>>::Output: ArrayLength<f64>,
{
let (fixed, mut moving, normalization) = self.normalize.normalize(fixed, moving);
let mut error = 0.;
let mut error_change = f64::MAX;
let mut iterations = 0;
let mut sigma2 = self.sigma2.unwrap_or(sigma2(&fixed, &moving));
let mut moved = moving.as_ref().clone();
let transformer = Transformer::new(&fixed, self.outlier_weight)?;
while iterations < self.max_iterations && self.error_change_threshold < error_change
&& self.sigma2_threshold < sigma2
{
let probabilities = transformer.probabilities(&moved, sigma2);
error_change = ((probabilities.error - error) / probabilities.error).abs();
info!(
"iterations={}, error_change={}, sigma2={}",
iterations, error_change, sigma2
);
error = probabilities.error;
sigma2 = registration.iterate(&fixed, &moving, &probabilities);
moved = registration.transform(&moving);
iterations += 1;
}
if let Some(normalization) = normalization {
registration.denormalize(&normalization);
normalization.moving.denormalize(moving.to_mut());
}
moved = registration.transform(&moving);
Ok(Run {
converged: iterations < self.max_iterations,
iterations: iterations,
moved: moved,
transform: registration.into(),
})
}
}
impl Default for Runner {
fn default() -> Runner {
Runner {
error_change_threshold: DEFAULT_ERROR_CHANGE_THRESHOLD,
max_iterations: DEFAULT_MAX_ITERATIONS,
normalize: Normalize::default(),
outlier_weight: DEFAULT_OUTLIER_WEIGHT,
sigma2: None,
sigma2_threshold: DEFAULT_SIGMA2_THRESHOLD,
}
}
}
/// The default sigma2 for two matrices.
///
/// # Examples
///
/// ```
/// use cpd::{runner, utils};
/// let matrix = utils::random_matrix2(10);
/// let sigma2 = runner::sigma2(&matrix, &matrix);
/// ```
pub fn sigma2<D>(fixed: &Matrix<D>, moving: &Matrix<D>) -> f64
where
D: DimName,
UInt: Mul<<D as DimName>::Value>,
<D as DimName>::Value: Mul + Mul<UInt>,
<UInt as Mul<<D as DimName>::Value>>::Output: ArrayLength<f64>,
<<D as DimName>::Value as Mul>::Output: ArrayLength<f64>,
<<D as DimName>::Value as Mul<UInt>>::Output: ArrayLength<f64>,
{
use RowVector;
let sum = |matrix: &Matrix<D>| {
RowVector::<D>::from_iterator((0..D::dim()).map(|d| matrix.column(d).iter().sum::<f64>()))
};
let numerator = fixed.nrows() as f64 * (fixed.transpose() * fixed).trace()
+ moving.nrows() as f64 * (moving.transpose() * moving).trace()
- 2. * (sum(fixed) * sum(moving).transpose())[0];
let denomintaor = (fixed.nrows() * moving.nrows() * D::dim()) as f64;
numerator / denomintaor
}
#[cfg(test)]
mod tests {
use utils;
#[test]
fn sigma2() {
let matrix = utils::matrix2_from_slice(&[1., 2., 3., 4.]);
assert_relative_eq!(0.5, super::sigma2(&matrix, &matrix));
}
}
| true
|
f213862161f7076e44a067fff6cb195f915bdeff
|
Rust
|
nanreh/strava-rs
|
/strava_rs/src/models/lap.rs
|
UTF-8
| 4,291
| 2.640625
| 3
|
[] |
no_license
|
/*
* Strava API v3
*
* The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs.
*
* The version of the OpenAPI document: 3.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Lap {
/// The unique identifier of this lap
#[serde(rename = "id", skip_serializing_if = "Option::is_none")]
pub id: Option<i64>,
#[serde(rename = "activity", skip_serializing_if = "Option::is_none")]
pub activity: Option<crate::models::MetaActivity>,
#[serde(rename = "athlete", skip_serializing_if = "Option::is_none")]
pub athlete: Option<crate::models::MetaAthlete>,
/// The lap's average cadence
#[serde(rename = "average_cadence", skip_serializing_if = "Option::is_none")]
pub average_cadence: Option<f32>,
/// The lap's average speed
#[serde(rename = "average_speed", skip_serializing_if = "Option::is_none")]
pub average_speed: Option<f32>,
/// The lap's distance, in meters
#[serde(rename = "distance", skip_serializing_if = "Option::is_none")]
pub distance: Option<f32>,
/// The lap's elapsed time, in seconds
#[serde(rename = "elapsed_time", skip_serializing_if = "Option::is_none")]
pub elapsed_time: Option<i32>,
/// The start index of this effort in its activity's stream
#[serde(rename = "start_index", skip_serializing_if = "Option::is_none")]
pub start_index: Option<i32>,
/// The end index of this effort in its activity's stream
#[serde(rename = "end_index", skip_serializing_if = "Option::is_none")]
pub end_index: Option<i32>,
/// The index of this lap in the activity it belongs to
#[serde(rename = "lap_index", skip_serializing_if = "Option::is_none")]
pub lap_index: Option<i32>,
/// The maximum speed of this lat, in meters per second
#[serde(rename = "max_speed", skip_serializing_if = "Option::is_none")]
pub max_speed: Option<f32>,
/// The lap's moving time, in seconds
#[serde(rename = "moving_time", skip_serializing_if = "Option::is_none")]
pub moving_time: Option<i32>,
/// The name of the lap
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// The athlete's pace zone during this lap
#[serde(rename = "pace_zone", skip_serializing_if = "Option::is_none")]
pub pace_zone: Option<i32>,
#[serde(rename = "split", skip_serializing_if = "Option::is_none")]
pub split: Option<i32>,
/// The time at which the lap was started.
#[serde(rename = "start_date", skip_serializing_if = "Option::is_none")]
pub start_date: Option<String>,
/// The time at which the lap was started in the local timezone.
#[serde(rename = "start_date_local", skip_serializing_if = "Option::is_none")]
pub start_date_local: Option<String>,
/// The elevation gain of this lap, in meters
#[serde(rename = "total_elevation_gain", skip_serializing_if = "Option::is_none")]
pub total_elevation_gain: Option<f32>,
}
impl Lap {
pub fn new() -> Lap {
Lap {
id: None,
activity: None,
athlete: None,
average_cadence: None,
average_speed: None,
distance: None,
elapsed_time: None,
start_index: None,
end_index: None,
lap_index: None,
max_speed: None,
moving_time: None,
name: None,
pace_zone: None,
split: None,
start_date: None,
start_date_local: None,
total_elevation_gain: None,
}
}
}
| true
|
96c06019b337d32e64732da77b21225360a444b6
|
Rust
|
kuenishi/qlitch
|
/src/lib.rs
|
UTF-8
| 1,637
| 3.109375
| 3
|
[] |
no_license
|
use std::io::prelude::*;
use std::fs::File;
pub fn glitch(filename : &str, pattern : &str) {
println!("Glitching {} with pattern '{}'", filename, pattern);
println!("generating a file as a debug...");
let mut f = File::create(filename).unwrap();
f.write_all(b"hello, world\n").unwrap();
f.write(&[1; 1]).unwrap();
f.write_all(b"damn, hello, world").unwrap();
}
pub fn find(filename : &str) {
println!("Find non-printables in {}", filename);
let mut f = File::open(filename).unwrap();
//let mut buf : Vec<u8> = Vec::with_capacity(65546);
let mut buf = [0; 65536];
let mut pos = 0;
loop {
let r = f.read(&mut buf);
match r {
Ok(s) if s == 0 => {
return
}
Ok(s) => {
// println!("Read {} bytes", s);
//println!(">{:?}", &buf[1..s]);
let line = String::from_utf8_lossy(&buf[0..s]);
println!("{}", line);
for (i, c) in line.chars().enumerate() {
//print!("{}>{},", c, c as u32);
if (c as u32) < 7 {
print!("offset:{} invalid:", pos+i); //, c as u32)
for d in c.escape_default() {
print!("{}", d)
}
println!("")
}
}
pos += s;
}
Err(e) => {
println!("Error: {}", e);
return
}
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
}
}
| true
|
d4b3bbca56577dfe7270c6455ecb10567477badd
|
Rust
|
95th/defaults
|
/tests/17-enums-struct-variant-incomplete.rs
|
UTF-8
| 149
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
use defaults::Defaults;
#[derive(Defaults)]
#[def = "A { a: 10 }"]
enum Foo {
A { a: u8, b: usize },
B { x: u8, y: usize },
}
fn main() {}
| true
|
c4c154779bb4ce95a9f7488357c619343d5b7ac0
|
Rust
|
brl/pH
|
/src/memory/ram.rs
|
UTF-8
| 4,681
| 2.90625
| 3
|
[] |
no_license
|
use std::sync::Arc;
use std::mem;
use crate::memory::{Mapping,AddressRange};
use crate::memory::mmap::Serializable;
use crate::system::{Result, Error};
use crate::util::ByteBuffer;
#[derive(Clone)]
pub struct GuestRam {
ram_size: usize,
regions: Arc<Vec<MemoryRegion>>,
}
impl GuestRam {
pub fn new(ram_size: usize) -> GuestRam {
GuestRam {
ram_size,
regions: Vec::new().into(),
}
}
pub fn ram_size(&self) -> usize {
self.ram_size
}
pub fn region_count(&self) -> usize {
self.regions.len()
}
pub fn write_bytes(&self, guest_address: u64, bytes: &[u8]) -> Result<()> {
let region = self.find_region(guest_address, bytes.len())?;
region.write_bytes(guest_address, bytes)
}
pub fn read_bytes(&self, guest_address: u64, bytes: &mut [u8]) -> Result<()> {
let region = self.find_region(guest_address, bytes.len())?;
region.read_bytes(guest_address, bytes)
}
pub fn slice(&self, guest_address: u64, size: usize) -> Result<&[u8]> {
let region = self.find_region(guest_address, size)?;
region.slice(guest_address, size)
}
pub fn mut_buffer(&self, guest_address: u64, size: usize) -> Result<ByteBuffer<&mut [u8]>> {
let bytes = self.mut_slice(guest_address, size)?;
Ok(ByteBuffer::from_bytes_mut(bytes))
}
pub fn mut_slice(&self, guest_address: u64, size: usize) -> Result<&mut[u8]> {
let region = self.find_region(guest_address, size)?;
region.mut_slice(guest_address, size)
}
pub fn write_int<T: Serializable>(&self, guest_address: u64, val: T) -> Result<()> {
let region = self.find_region(guest_address, mem::size_of::<T>())?;
region.write_int(guest_address, val)
}
pub fn read_int<T: Serializable>(&self, guest_address: u64) -> Result<T> {
let region = self.find_region(guest_address, mem::size_of::<T>())?;
region.read_int(guest_address)
}
pub fn set_regions(&mut self, regions: Vec<MemoryRegion>) {
self.regions = regions.into();
}
#[allow(dead_code)]
pub fn end_addr(&self) -> u64 {
self.regions.iter()
.max_by_key(|r| r.guest_range.end())
.map_or(0, |r| r.guest_range.end())
}
pub fn is_valid_range(&self, guest_address: u64, size: usize) -> bool {
self.find_region(guest_address, size).is_ok()
}
fn find_region(&self, guest_address: u64, size: usize) -> Result<&MemoryRegion> {
self.regions.iter()
.find(|r| r.contains(guest_address, size))
.ok_or(Error::InvalidAddress(guest_address))
}
}
pub struct MemoryRegion {
guest_range: AddressRange,
mapping: Mapping,
}
impl MemoryRegion {
pub fn new(guest_base: u64, size: usize) -> Result<MemoryRegion> {
Ok(MemoryRegion{
guest_range: AddressRange::new(guest_base, size),
mapping: Mapping::new(size)?,
})
}
pub fn base_address(&self) -> u64 {
self.mapping.address()
}
fn contains(&self, guest_addr: u64, size: usize) -> bool { self.guest_range.contains(guest_addr, size) }
fn checked_offset(&self, guest_addr: u64, size: usize) -> Result<usize> {
if self.contains(guest_addr, size) {
Ok(self.guest_range.offset_of(guest_addr))
} else {
Err(Error::InvalidAddress(guest_addr))
}
}
pub fn write_bytes(&self, guest_address: u64, bytes: &[u8]) -> Result<()> {
let offset = self.checked_offset(guest_address, bytes.len())?;
self.mapping.write_bytes(offset, bytes)
}
pub fn read_bytes(&self, guest_address: u64, bytes: &mut [u8]) -> Result<()> {
let offset = self.checked_offset(guest_address, bytes.len())?;
self.mapping.read_bytes(offset, bytes)
}
pub fn slice(&self, guest_address: u64, size: usize) -> Result<&[u8]> {
let offset = self.checked_offset(guest_address, size)?;
self.mapping.slice(offset, size)
}
pub fn mut_slice(&self, guest_address: u64, size: usize) -> Result<&mut [u8]> {
let offset = self.checked_offset(guest_address, size)?;
self.mapping.mut_slice(offset, size)
}
pub fn write_int<T: Serializable>(&self, guest_address: u64, val: T) -> Result<()> {
let offset = self.checked_offset(guest_address, mem::size_of::<T>())?;
self.mapping.write_int(offset, val)
}
pub fn read_int<T: Serializable>(&self, guest_address: u64) -> Result<T> {
let offset = self.checked_offset(guest_address, mem::size_of::<T>())?;
self.mapping.read_int(offset)
}
}
| true
|
269ea85d83ca0ce37ad370b017b8ec4df6813299
|
Rust
|
CirrusNeptune/libftd2xx-rs
|
/src/util.rs
|
UTF-8
| 1,822
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
pub trait NumberBoilerplate {
fn zero() -> Self;
}
impl NumberBoilerplate for i8 {
fn zero() -> i8 {
0i8
}
}
impl NumberBoilerplate for u8 {
fn zero() -> u8 {
0u8
}
}
// Converts an i8 or u8 slice into a string. Non UTF-8 will be lost.
//
// The FTDI strings have unique requiements:
// * They may contain interior nul bytes.
// * They might not be nul terminated.
pub fn slice_into_string<T>(array: &[T]) -> String
where
T: NumberBoilerplate + std::cmp::PartialEq,
{
let mut idx: usize = array.len();
for (i, element) in array.iter().enumerate() {
if *element == NumberBoilerplate::zero() {
idx = i;
break;
}
}
// Safety: The trait bounds for T are only implemented for u8 and i8, which
// are equal size, and are therefore safe to transmute.
debug_assert_eq!(std::mem::size_of::<T>(), std::mem::size_of::<u8>());
String::from_utf8_lossy(unsafe { &*(&array[0..idx] as *const [T] as *const [u8]) }).to_string()
}
#[cfg(test)]
mod slice_into_string {
use super::*;
#[test]
fn empty() {
let data: [i8; 0] = [];
assert_eq!(slice_into_string(&data), String::from(""));
}
#[test]
fn positive_path() {
let data: [u8; 2] = [0x61, 0x00];
assert_eq!(slice_into_string(&data), String::from("a"));
}
#[test]
fn interior_nul() {
let data: [i8; 3] = [0x61, 0x00, 0x61];
assert_eq!(slice_into_string(&data), String::from("a"));
}
#[test]
fn no_nul() {
let data: [i8; 3] = [0x61; 3];
assert_eq!(slice_into_string(&data), String::from("aaa"));
}
#[test]
fn non_utf8() {
let data: [i8; 2] = [0xFEu8 as i8, 0x00];
assert_eq!(slice_into_string(&data), String::from("�"));
}
}
| true
|
868f0916b7a30c315b08771b6544bb7c433f89d2
|
Rust
|
tomohiko38/rust_projects
|
/old/conv_fahr_celsius/src/main.rs
|
UTF-8
| 336
| 3.578125
| 4
|
[] |
no_license
|
fn main() {
let mut celsius = 0.0;
let step = 10.0;
let max_celsius = 100.0;
while celsius < max_celsius {
let fahr = conv_c_to_f(celsius);
println!("celsius:{} fahr:{}", celsius, fahr);
celsius = celsius + step;
}
}
fn conv_c_to_f(celsius: f32) -> f32 {
celsius / (5.0 / 9.0) + 32.0
}
| true
|
1eae7ce8b4231be68ac05ff0db88b72a8174009f
|
Rust
|
rschifflin/1gam_feb2017
|
/src/fsm/hero/jump/single_jump.rs
|
UTF-8
| 2,659
| 2.921875
| 3
|
[] |
no_license
|
use input::{self, Input};
use super::Jump;
use super::constants::*;
#[derive(Debug, PartialEq)]
enum SingleJumpState {
Standing,
PreJump,
Jumping,
Falling
}
#[derive(Debug, PartialEq)]
pub struct SingleJumpFSM {
jump_state: (usize, SingleJumpState),
yvel: f64
}
impl SingleJumpFSM {
pub fn new() -> SingleJumpFSM {
SingleJumpFSM {
jump_state: (0, SingleJumpState::Standing),
yvel: 0.0
}
}
fn _update(&self, yvel: f64, (last_input, next_input): (Input, Input)) -> SingleJumpFSM {
match self.jump_state.1 {
SingleJumpState::Standing => {
if yvel != 0.0 {
SingleJumpFSM {
jump_state: (0, SingleJumpState::Falling),
yvel: yvel
}
} else if next_input.contains(input::JUMP) && !last_input.contains(input::JUMP) {
SingleJumpFSM {
jump_state: (0, SingleJumpState::PreJump),
yvel: PREJUMP_YVEL
}
} else {
SingleJumpFSM {
jump_state: (0, SingleJumpState::Standing),
yvel: yvel
}
}
},
SingleJumpState::PreJump => {
let frame_counter = self.jump_state.0 + 1;
if yvel > 0.0 {
SingleJumpFSM {
jump_state: (0, SingleJumpState::Falling),
yvel: yvel
}
} else {
let new_yvel = yvel + PREJUMP_ACCEL * PREJUMP_ACCEL_DECAY.powi(frame_counter as i32);
if frame_counter > PREJUMP_FRAMES || !next_input.contains(input::JUMP) {
SingleJumpFSM {
jump_state: (0, SingleJumpState::Jumping),
yvel: new_yvel
}
} else {
SingleJumpFSM {
jump_state: (frame_counter, SingleJumpState::PreJump),
yvel: new_yvel
}
}
}
},
SingleJumpState::Jumping => {
if yvel > 0.0 {
SingleJumpFSM {
jump_state: (0, SingleJumpState::Falling),
yvel: yvel
}
} else {
SingleJumpFSM {
jump_state: (0, SingleJumpState::Jumping),
yvel: yvel
}
}
},
SingleJumpState::Falling => {
SingleJumpFSM {
jump_state: (0, SingleJumpState::Falling),
yvel: yvel + FALLING_DECEL
}
}
}
}
}
impl Jump for SingleJumpFSM {
fn get_yvel(&self) -> f64 { self.yvel }
fn on_landed(&mut self) {
*self = SingleJumpFSM {
jump_state: (0, SingleJumpState::Standing),
yvel: 0.0
};
}
fn update(&mut self, yvel: f64, inputs: (Input, Input)) {
*self = self._update(yvel, inputs);
}
}
| true
|
dd0db8932eeab09a263acdb1613b013fb08c8e1e
|
Rust
|
safrannn/leetcode_rust
|
/src/_0678_valid_parenthesis_string.rs
|
UTF-8
| 796
| 3.390625
| 3
|
[] |
no_license
|
struct Solution;
impl Solution {
pub fn check_valid_string(s: String) -> bool {
let mut h_low: i32 = 0;
let mut h_high: i32 = 0;
for v in s.chars() {
if v == '(' {
h_low += 1;
} else {
h_low -= 1;
}
if v != ')' {
h_high += 1;
} else {
h_high -= 1;
}
if h_high < 0 {
return false;
}
h_low = h_low.max(0);
}
h_low == 0
}
}
#[test]
fn test() {
assert_eq!(Solution::check_valid_string("()".to_string()), true);
assert_eq!(Solution::check_valid_string("(*)".to_string()), true);
assert_eq!(Solution::check_valid_string("(*))".to_string()), true);
}
| true
|
c1bfe493d936ebc7b9c1050d5c5114f2b0851713
|
Rust
|
bytecodealliance/wasmtime
|
/build.rs
|
UTF-8
| 9,235
| 2.625
| 3
|
[
"LLVM-exception",
"Apache-2.0"
] |
permissive
|
//! Build program to generate a program which runs all the testsuites.
//!
//! By generating a separate `#[test]` test for each file, we allow cargo test
//! to automatically run the files in parallel.
use anyhow::Context;
use std::env;
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::Command;
fn main() -> anyhow::Result<()> {
println!("cargo:rerun-if-changed=build.rs");
let out_dir = PathBuf::from(
env::var_os("OUT_DIR").expect("The OUT_DIR environment variable must be set"),
);
let mut out = String::new();
for strategy in &["Cranelift", "Winch"] {
writeln!(out, "#[cfg(test)]")?;
writeln!(out, "#[allow(non_snake_case)]")?;
if *strategy == "Winch" {
// We only test Winch on x86_64, for now.
writeln!(out, "{}", "#[cfg(all(target_arch = \"x86_64\"))]")?;
}
writeln!(out, "mod {} {{", strategy)?;
with_test_module(&mut out, "misc", |out| {
test_directory(out, "tests/misc_testsuite", strategy)?;
test_directory_module(out, "tests/misc_testsuite/multi-memory", strategy)?;
test_directory_module(out, "tests/misc_testsuite/simd", strategy)?;
test_directory_module(out, "tests/misc_testsuite/tail-call", strategy)?;
test_directory_module(out, "tests/misc_testsuite/threads", strategy)?;
test_directory_module(out, "tests/misc_testsuite/memory64", strategy)?;
test_directory_module(out, "tests/misc_testsuite/component-model", strategy)?;
test_directory_module(out, "tests/misc_testsuite/function-references", strategy)?;
// The testsuite of Winch is a subset of the official
// WebAssembly test suite, until parity is reached. This
// check is in place to prevent Cranelift from duplicating
// tests.
if *strategy == "Winch" {
test_directory_module(out, "tests/misc_testsuite/winch", strategy)?;
}
Ok(())
})?;
with_test_module(&mut out, "spec", |out| {
let spec_tests = test_directory(out, "tests/spec_testsuite", strategy)?;
// Skip running spec_testsuite tests if the submodule isn't checked
// out.
if spec_tests > 0 {
test_directory_module(out, "tests/spec_testsuite/proposals/memory64", strategy)?;
test_directory_module(
out,
"tests/spec_testsuite/proposals/function-references",
strategy,
)?;
test_directory_module(
out,
"tests/spec_testsuite/proposals/multi-memory",
strategy,
)?;
test_directory_module(out, "tests/spec_testsuite/proposals/threads", strategy)?;
test_directory_module(
out,
"tests/spec_testsuite/proposals/relaxed-simd",
strategy,
)?;
test_directory_module(out, "tests/spec_testsuite/proposals/tail-call", strategy)?;
} else {
println!(
"cargo:warning=The spec testsuite is disabled. To enable, run `git submodule \
update --remote`."
);
}
Ok(())
})?;
writeln!(out, "}}")?;
}
// Write out our auto-generated tests and opportunistically format them with
// `rustfmt` if it's installed.
let output = out_dir.join("wast_testsuite_tests.rs");
fs::write(&output, out)?;
drop(Command::new("rustfmt").arg(&output).status());
Ok(())
}
fn test_directory_module(
out: &mut String,
path: impl AsRef<Path>,
strategy: &str,
) -> anyhow::Result<usize> {
let path = path.as_ref();
let testsuite = &extract_name(path);
with_test_module(out, testsuite, |out| test_directory(out, path, strategy))
}
fn test_directory(
out: &mut String,
path: impl AsRef<Path>,
strategy: &str,
) -> anyhow::Result<usize> {
let path = path.as_ref();
let mut dir_entries: Vec<_> = path
.read_dir()
.context(format!("failed to read {:?}", path))?
.map(|r| r.expect("reading testsuite directory entry"))
.filter_map(|dir_entry| {
let p = dir_entry.path();
let ext = p.extension()?;
// Only look at wast files.
if ext != "wast" {
return None;
}
// Ignore files starting with `.`, which could be editor temporary files
if p.file_stem()?.to_str()?.starts_with('.') {
return None;
}
Some(p)
})
.collect();
dir_entries.sort();
let testsuite = &extract_name(path);
for entry in dir_entries.iter() {
write_testsuite_tests(out, entry, testsuite, strategy, false)?;
write_testsuite_tests(out, entry, testsuite, strategy, true)?;
}
Ok(dir_entries.len())
}
/// Extract a valid Rust identifier from the stem of a path.
fn extract_name(path: impl AsRef<Path>) -> String {
path.as_ref()
.file_stem()
.expect("filename should have a stem")
.to_str()
.expect("filename should be representable as a string")
.replace(['-', '/'], "_")
}
fn with_test_module<T>(
out: &mut String,
testsuite: &str,
f: impl FnOnce(&mut String) -> anyhow::Result<T>,
) -> anyhow::Result<T> {
out.push_str("mod ");
out.push_str(testsuite);
out.push_str(" {\n");
let result = f(out)?;
out.push_str("}\n");
Ok(result)
}
fn write_testsuite_tests(
out: &mut String,
path: impl AsRef<Path>,
testsuite: &str,
strategy: &str,
pooling: bool,
) -> anyhow::Result<()> {
let path = path.as_ref();
let testname = extract_name(path);
writeln!(out, "#[test]")?;
// Ignore when using QEMU for running tests (limited memory).
if ignore(testsuite, &testname, strategy) {
writeln!(out, "#[ignore]")?;
} else {
writeln!(out, "#[cfg_attr(miri, ignore)]")?;
}
writeln!(
out,
"fn r#{}{}() {{",
&testname,
if pooling { "_pooling" } else { "" }
)?;
writeln!(out, " let _ = env_logger::try_init();")?;
writeln!(
out,
" crate::wast::run_wast(r#\"{}\"#, crate::wast::Strategy::{}, {}).unwrap();",
path.display(),
strategy,
pooling,
)?;
writeln!(out, "}}")?;
writeln!(out)?;
Ok(())
}
/// Ignore tests that aren't supported yet.
fn ignore(testsuite: &str, testname: &str, strategy: &str) -> bool {
assert!(strategy == "Cranelift" || strategy == "Winch");
// Ignore everything except the winch misc test suite.
// We ignore tests that assert for traps on windows, given
// that Winch doesn't encode unwind information for Windows, yet.
if strategy == "Winch" {
if testsuite != "winch" {
return true;
}
let assert_trap = ["i32", "i64"].contains(&testname);
if assert_trap && env::var("CARGO_CFG_TARGET_OS").unwrap().as_str() == "windows" {
return true;
}
}
// This is an empty file right now which the `wast` crate doesn't parse
if testname.contains("memory_copy1") {
return true;
}
if testsuite == "function_references" {
// The following tests fail due to function references not yet
// being exposed in the public API.
if testname == "ref_null" || testname == "local_init" {
return true;
}
// This test fails due to incomplete support for the various
// table/elem syntactic sugar in wasm-tools/wast.
if testname == "br_table" {
return true;
}
// This test fails due to the current implementation of type
// canonicalisation being broken as a result of
// #[derive(hash)] on WasmHeapType.
if testname == "type_equivalence" {
return true;
}
}
match env::var("CARGO_CFG_TARGET_ARCH").unwrap().as_str() {
"s390x" => {
// FIXME: These tests fail under qemu due to a qemu bug.
testname == "simd_f32x4_pmin_pmax" || testname == "simd_f64x2_pmin_pmax"
// TODO(#6530): These tests require tail calls, but s390x
// doesn't support them yet.
|| testsuite == "function_references" || testsuite == "tail_call"
}
"riscv64" => {
if testsuite.contains("relaxed_simd") {
return true;
}
let known_failure = [
"canonicalize_nan",
"cvt_from_uint",
"issue_3327_bnot_lowering",
"simd_conversions",
"simd_f32x4_rounding",
"simd_f64x2_rounding",
"simd_i32x4_trunc_sat_f32x4",
"simd_i32x4_trunc_sat_f64x2",
"simd_load",
"simd_splat",
]
.contains(&testname);
known_failure
}
_ => false,
}
}
| true
|
154f39ae82db7466faa8013d185173f154031bc3
|
Rust
|
ekirshey/rusty_rogue
|
/src/world/tile.rs
|
UTF-8
| 1,842
| 3.375
| 3
|
[] |
no_license
|
use world::Direction;
use utils::Vec3;
// I need to rethink this tile structure
// I think I put too much into tiletype instead of tile
pub struct TileDisplay {
pub fg : Vec3<u8>,
pub bg : Vec3<u8>,
pub icon : char
}
#[derive(Debug)]
pub enum TileType {
Wall,
Granite,
Exit {
node_id : usize,
exiting_direction : Direction
},
}
impl TileType {
// Maybe move this out into some sort of "colorizer" object
// that takes in the tile type and the dungeon "theme" to
// determine what the appropraite color should be
pub fn value(&self) -> TileDisplay {
match *self {
TileType::Wall => TileDisplay {
fg : Vec3::new(255, 255, 255),
bg : Vec3::new(95,95,95),
icon : '#'
},
TileType::Granite => TileDisplay {
fg : Vec3::new(255, 255, 255),
bg : Vec3::new(95,95,95),
icon : '.'
},
TileType::Exit{node_id, exiting_direction} => TileDisplay {
fg : Vec3::new(0, 0, 0),
bg : Vec3::new(255, 242, 0),
icon : ' '
},
}
}
pub fn collidable(&self) -> bool {
match *self {
TileType::Wall => true,
TileType::Granite => false,
TileType::Exit{node_id, exiting_direction} => false,
}
}
}
impl Copy for TileType { }
impl Clone for TileType {
fn clone(&self) -> TileType {
*self
}
}
pub struct Tile {
pub id : TileType,
pub occupied : bool,
pub uuid : usize,
pub corpses : Vec<usize>
}
impl Tile {
pub fn new(id : TileType) -> Tile {
Tile {
id,
occupied : false,
uuid : 0,
corpses : Vec::new()
}
}
}
| true
|
3ce8988e655d0bde39ed19f1d12cdaccd02f391d
|
Rust
|
ron-rs/ron
|
/tests/393_serde_errors.rs
|
UTF-8
| 6,288
| 2.765625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
use ron::error::{Error, Position, SpannedError};
#[derive(Debug, serde::Deserialize, PartialEq)]
#[serde(deny_unknown_fields)]
enum TestEnum {
StructVariant { a: bool, b: char, c: i32 },
NewtypeVariant(TestStruct),
}
#[derive(Debug, serde::Deserialize, PartialEq)]
#[serde(tag = "type")]
enum TestEnumInternal {
StructVariant { a: bool },
}
#[derive(Debug, serde::Deserialize, PartialEq)]
#[serde(tag = "type", content = "content")]
enum TestEnumAdjacent {
StructVariant { a: bool },
}
#[derive(Debug, serde::Deserialize, PartialEq)]
#[serde(untagged)]
enum TestEnumUntagged {
StructVariant { a: bool },
}
#[derive(Debug, serde::Deserialize, PartialEq)]
#[serde(deny_unknown_fields)]
struct TestStruct {
a: bool,
b: char,
c: i32,
}
#[test]
fn test_unknown_enum_variant() {
assert_eq!(
ron::from_str::<TestEnum>("NotAVariant"),
Err(SpannedError {
code: Error::NoSuchEnumVariant {
expected: &["StructVariant", "NewtypeVariant"],
found: String::from("NotAVariant"),
outer: Some(String::from("TestEnum")),
},
position: Position { line: 1, col: 12 },
})
);
}
#[test]
fn test_struct_enum_fields() {
assert_eq!(
ron::from_str::<TestEnum>("StructVariant(a: true, b: 'b', c: -42, d: \"gotcha\")"),
Err(SpannedError {
code: Error::NoSuchStructField {
expected: &["a", "b", "c"],
found: String::from("d"),
outer: Some(String::from("StructVariant")),
},
position: Position { line: 1, col: 41 },
})
);
assert_eq!(
ron::from_str::<TestEnum>("StructVariant(a: true, c: -42)"),
Err(SpannedError {
code: Error::MissingStructField {
field: "b",
outer: Some(String::from("StructVariant")),
},
position: Position { line: 1, col: 30 },
})
);
assert_eq!(
ron::from_str::<TestEnum>("StructVariant(a: true, b: 'b', a: false, c: -42)"),
Err(SpannedError {
code: Error::DuplicateStructField {
field: "a",
outer: Some(String::from("StructVariant")),
},
position: Position { line: 1, col: 33 },
})
);
}
#[test]
fn test_newtype_enum_fields() {
assert_eq!(
ron::from_str::<TestEnum>("#![enable(unwrap_variant_newtypes)] NewtypeVariant(a: true, b: 'b', c: -42, d: \"gotcha\")"),
Err(SpannedError {
code: Error::NoSuchStructField {
expected: &["a", "b", "c"],
found: String::from("d"),
outer: Some(String::from("NewtypeVariant")),
},
position: Position { line: 1, col: 78 },
})
);
assert_eq!(
ron::from_str::<TestEnum>(
"#![enable(unwrap_variant_newtypes)] NewtypeVariant(a: true, c: -42)"
),
Err(SpannedError {
code: Error::MissingStructField {
field: "b",
outer: Some(String::from("NewtypeVariant")),
},
position: Position { line: 1, col: 67 },
})
);
assert_eq!(
ron::from_str::<TestEnum>(
"#![enable(unwrap_variant_newtypes)] NewtypeVariant(a: true, b: 'b', a: false, c: -42)"
),
Err(SpannedError {
code: Error::DuplicateStructField {
field: "a",
outer: Some(String::from("NewtypeVariant")),
},
position: Position { line: 1, col: 70 },
})
);
}
#[test]
fn test_struct_fields() {
assert_eq!(
ron::from_str::<TestStruct>("TestStruct(a: true, b: 'b', c: -42, d: \"gotcha\")"),
Err(SpannedError {
code: Error::NoSuchStructField {
expected: &["a", "b", "c"],
found: String::from("d"),
outer: Some(String::from("TestStruct")),
},
position: Position { line: 1, col: 38 },
})
);
assert_eq!(
ron::from_str::<TestStruct>("TestStruct(a: true, c: -42)"),
Err(SpannedError {
code: Error::MissingStructField {
field: "b",
outer: Some(String::from("TestStruct")),
},
position: Position { line: 1, col: 27 },
})
);
assert_eq!(
ron::from_str::<TestStruct>("TestStruct(a: true, b: 'b', a: false, c: -42)"),
Err(SpannedError {
code: Error::DuplicateStructField {
field: "a",
outer: Some(String::from("TestStruct")),
},
position: Position { line: 1, col: 30 },
})
);
}
#[test]
fn test_internally_tagged_enum() {
// Note: Not extracting the variant type is not great,
// but at least not wrong either
// Since the error occurs in serde-generated user code,
// after successfully deserialising, we cannot annotate
assert_eq!(
ron::from_str::<TestEnumInternal>("(type: \"StructVariant\")"),
Err(SpannedError {
code: Error::MissingStructField {
field: "a",
outer: None,
},
position: Position { line: 1, col: 24 },
})
);
}
#[test]
fn test_adjacently_tagged_enum() {
// Note: TestEnumAdjacent makes sense here since we are now treating
// the enum as a struct
assert_eq!(
ron::from_str::<TestEnumAdjacent>("(type: StructVariant, content: (d: 4))"),
Err(SpannedError {
code: Error::MissingStructField {
field: "a",
outer: Some(String::from("TestEnumAdjacent")),
},
position: Position { line: 1, col: 37 },
})
);
}
#[test]
fn test_untagged_enum() {
// Note: Errors inside untagged enums are not bubbled up
assert_eq!(
ron::from_str::<TestEnumUntagged>("(a: true, a: false)"),
Err(SpannedError {
code: Error::Message(String::from(
"data did not match any variant of untagged enum TestEnumUntagged"
)),
position: Position { line: 1, col: 20 },
})
);
}
| true
|
0a46a788853ff9941800363df3c350c4ae9fa2d3
|
Rust
|
icyJoseph/advent-of-code
|
/2021/day-2/src/main.rs
|
UTF-8
| 1,429
| 3.0625
| 3
|
[] |
no_license
|
use aoc;
fn parse_num<T: std::str::FromStr>(str: &str) -> T {
match str.trim().parse::<T>() {
Ok(n) => n,
_ => panic!("Error parsing"),
}
}
fn solve(raw: String) -> () {
let rows = raw.trim().split("\n").collect::<Vec<&str>>();
let part_one: (u32, u32) = rows.iter().fold((0, 0), |prev, row| {
let command = row.split(" ").collect::<Vec<&str>>();
let dir: &str = command[0];
let step = parse_num::<u32>(command[1]);
match dir {
"forward" => return (prev.0 + step, prev.1),
"down" => return (prev.0, prev.1 + step),
"up" => return (prev.0, prev.1 - step),
_ => panic!("Missing command"),
}
});
println!("Part One: {}", part_one.0 * part_one.1); // 1746616
let part_two = rows.iter().fold((0, 0, 0), |prev, row| {
let command = row.split(" ").collect::<Vec<&str>>();
let dir: &str = command[0];
let step = parse_num::<u32>(command[1]);
match dir {
"forward" => return (prev.0 + step, prev.1 + prev.2 * step, prev.2),
"down" => return (prev.0, prev.1, prev.2 + step),
"up" => return (prev.0, prev.1, prev.2 - step),
_ => panic!("Missing command"),
}
});
println!("Part Two: {}", part_two.0 * part_two.1); // 1741971043
}
fn main() {
let input = aoc::get_input(2021, 2);
solve(input);
}
| true
|
f2352f2ad5c69b3279616b3cbc557635e1e8be27
|
Rust
|
jlgerber/pbgui
|
/pbgui-withs/src/traits.rs
|
UTF-8
| 2,753
| 2.734375
| 3
|
[] |
no_license
|
use qt_widgets::{cpp_core::MutPtr, QFrame, QHBoxLayout, QVBoxLayout, QWidget};
//
// TRAITS
//
pub unsafe trait NewWidget<P, R> {
fn create(parent: &MutPtr<P>) -> MutPtr<R>;
}
unsafe impl NewWidget<QWidget, QWidget> for QWidget {
fn create(parent: &MutPtr<QWidget>) -> MutPtr<QWidget> {
unsafe {
let mut main = QWidget::new_0a();
let main_ptr = main.as_mut_ptr();
let mut parent_ptr = parent.layout();
assert!(!parent_ptr.is_null());
parent_ptr.add_widget(main.into_ptr());
main_ptr
}
}
}
unsafe impl NewWidget<QWidget, QFrame> for QFrame {
fn create(parent: &MutPtr<QWidget>) -> MutPtr<QFrame> {
unsafe {
let mut main = QFrame::new_0a();
let main_ptr = main.as_mut_ptr();
let mut parent_ptr = parent.layout();
assert!(!parent_ptr.is_null());
parent_ptr.add_widget(main.into_ptr());
main_ptr
}
}
}
/// Choose the type of layout that you want to create
/// in the AddLayout trait implementation
#[allow(dead_code)]
pub enum LayoutType {
VBoxLayout,
HBoxLayout,
}
/// Trait provides a function to add a layout to
pub unsafe trait AddLayout<R> {
type Layout;
fn add_layout(&mut self, layout: Self::Layout) -> MutPtr<R>;
}
fn add_layout_to_widget(widget: &mut MutPtr<QWidget>, layout: LayoutType) {
unsafe {
match layout {
LayoutType::VBoxLayout => {
let mut layout = QVBoxLayout::new_0a();
layout.set_margin(0);
layout.set_contents_margins_4a(0, 0, 0, 0);
layout.set_spacing(0);
widget.set_layout(layout.into_ptr());
}
LayoutType::HBoxLayout => {
let mut layout = QHBoxLayout::new_0a();
layout.set_margin(0);
layout.set_contents_margins_4a(0, 0, 0, 0);
layout.set_spacing(0);
widget.set_layout(layout.into_ptr());
}
}
}
unsafe impl AddLayout<QWidget> for MutPtr<QWidget> {
type Layout = LayoutType;
fn add_layout(&mut self, layout: LayoutType) -> MutPtr<QWidget> {
unsafe {
add_layout_to_widget(self, layout);
self.as_mut_ref().unwrap().as_mut_ptr()
}
}
}
}
unsafe impl AddLayout<QFrame> for MutPtr<QFrame> {
type Layout = LayoutType;
fn add_layout(&mut self, layout: LayoutType) -> MutPtr<QFrame> {
unsafe {
let mut qw: MutPtr<QWidget> = self.static_upcast_mut();
add_layout_to_widget(&mut qw, layout);
self.as_mut_ref().unwrap().as_mut_ptr()
}
}
}
| true
|
d6c06752e6373705ce36099035356ddd47838aa5
|
Rust
|
jtdowney/ray_tracer
|
/src/shape.rs
|
UTF-8
| 4,276
| 3.46875
| 3
|
[] |
no_license
|
mod cone;
mod cube;
mod cylinder;
mod plane;
mod sphere;
use crate::{Intersections, Material, Matrix4, Point, Ray, Vector};
pub use cone::*;
pub use cube::*;
pub use cylinder::*;
pub use plane::*;
pub use sphere::*;
pub trait Shape {
fn transform(&self) -> &Matrix4;
fn set_transform(&mut self, transform: Matrix4);
fn material(&self) -> &Material;
fn set_material(&mut self, material: Material);
fn local_intersect(&self, ray: Ray) -> Intersections;
fn local_normal_at(&self, point: Point) -> Vector;
fn intersect(&self, ray: Ray) -> Intersections {
let inverse_transform = self.transform().inverse();
let local_ray = ray.transform(inverse_transform);
self.local_intersect(local_ray)
}
fn normal_at(&self, point: Point) -> Vector {
let inverse_transform = self.transform().inverse();
let local_point = inverse_transform * point;
let local_normal = self.local_normal_at(local_point);
let world_normal = inverse_transform.transpose() * local_normal;
world_normal.normalize()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{material, point, ray, transformations, vector};
use approx::assert_abs_diff_eq;
use derive_builder::Builder;
use std::{cell::Cell, f64::consts::PI};
fn test_shape() -> TestShape {
TestShapeBuilder::default().build().unwrap()
}
#[derive(Builder)]
struct TestShape {
#[builder(default = "Matrix4::identity()")]
pub transform: Matrix4,
#[builder(default = "material()")]
pub material: Material,
#[builder(setter(skip))]
saved_ray: Cell<Option<Ray>>,
}
impl Shape for TestShape {
fn transform(&self) -> &Matrix4 {
&self.transform
}
fn set_transform(&mut self, _transform: Matrix4) {
todo!()
}
fn material(&self) -> &Material {
&self.material
}
fn set_material(&mut self, _material: Material) {
todo!()
}
fn local_intersect(&self, ray: Ray) -> Intersections {
self.saved_ray.set(Some(ray));
Intersections::empty()
}
fn local_normal_at(&self, point: Point) -> Vector {
vector(point.x, point.y, point.z)
}
}
#[test]
fn default_transformation() {
let s = test_shape();
assert_eq!(s.transform(), &Matrix4::identity());
}
#[test]
fn intersecting_scaled_shape_with_ray() {
let r = ray(point(0.0, 0.0, -5.0), vector(0.0, 0.0, 1.0));
let s = TestShapeBuilder::default()
.transform(transformations::scaling(2.0, 2.0, 2.0))
.build()
.unwrap();
s.intersect(r);
let saved_ray = s.saved_ray.get().unwrap();
assert_abs_diff_eq!(saved_ray.origin, point(0.0, 0.0, -2.5));
assert_abs_diff_eq!(saved_ray.direction, vector(0.0, 0.0, 0.5));
}
#[test]
fn intersecting_translated_shape_with_ray() {
let r = ray(point(0.0, 0.0, -5.0), vector(0.0, 0.0, 1.0));
let s = TestShapeBuilder::default()
.transform(transformations::translation(5.0, 0.0, 0.0))
.build()
.unwrap();
s.intersect(r);
let saved_ray = s.saved_ray.get().unwrap();
assert_abs_diff_eq!(saved_ray.origin, point(-5.0, 0.0, -5.0));
assert_abs_diff_eq!(saved_ray.direction, vector(0.0, 0.0, 1.0));
}
#[test]
fn computing_normal_on_translated_shape() {
let s = TestShapeBuilder::default()
.transform(transformations::translation(0.0, 1.0, 0.0))
.build()
.unwrap();
let n = s.normal_at(point(0.0, 1.70711, -0.70711));
assert_abs_diff_eq!(n, vector(0.0, 0.70711, -0.70711));
}
#[test]
fn computing_normal_on_transformed_shape() {
let s = TestShapeBuilder::default()
.transform(
transformations::scaling(1.0, 0.5, 1.0) * transformations::rotation_z(PI / 5.0),
)
.build()
.unwrap();
let n = s.normal_at(point(0.0, f64::sqrt(2.0) / 2.0, -f64::sqrt(2.0) / 2.0));
assert_abs_diff_eq!(n, vector(0.0, 0.97014, -0.24254));
}
}
| true
|
3c36314aa5dc6cbe8a4816a8ec82fffc24f78ebf
|
Rust
|
tomitheninja/project-rust
|
/p012/src/main.rs
|
UTF-8
| 1,019
| 3.953125
| 4
|
[] |
no_license
|
//! The sequence of triangle numbers is generated by adding the natural numbers.
//! So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.
//! The first ten terms would be:
//!
//! 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
//!
//! [...] We can see that 28 is the first triangle number to have over five divisors.
//!
//! What is the value of the first triangle number to have over five hundred divisors?
extern crate factors;
extern crate triangular_number;
use factors::Factorable;
use triangular_number::TriangularNumber;
fn main() {
println!("{}", solve(500));
}
/// The first triangular with number with more than `n` factors
fn solve(n: usize) -> u64 {
TriangularNumber::new()
.find(|&num| Factorable::new(num).num_factors() > n)
.unwrap()
}
#[cfg(test)]
mod p012 {
use super::solve;
#[test]
fn over_five_divisors() {
assert_eq!(28, solve(5));
}
#[test]
fn over_five_hundred_divisors() {
assert_eq!(76576500, solve(500));
}
}
| true
|
0be417e029759542b5643e4137bff3779d7aa805
|
Rust
|
lemonrock/simple-http-server
|
/workspace/simple-http-server-vectored-buffers/src/VectoredBuffer.rs
|
UTF-8
| 7,661
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
// This file is part of simple-http-server. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/simple-http-server/master/COPYRIGHT. No part of simple-http-server, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2018 The developers of simple-http-server. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/simple-http-server/master/COPYRIGHT.
/// A vectored buffer, ie one consisting of one or more individual buffers.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct VectoredBuffer
{
buffers: RingBuffer<BufferReference>,
allocation_observer_identifier: AllocationObserverIdentifier,
}
impl<'b> VectoredBuffer
{
/// Use instead of `::std::io::Read.read()`.
///
/// Returns a tuple of `(bytes_read, next_offset)`.
///
/// If the `VectoredBuffer` is full, `next_offset` will be `None`.
#[inline(always)]
pub fn read_into(&self, read: &mut impl Read, starting_at: VectoredBufferOffset) -> io::Result<(usize, Option<VectoredBufferOffset>)>
{
let mut buffer_index = starting_at.buffer_index;
let offset = starting_at.offset;
let bytes_read =
{
let bytes_read = read.read(self.get_mutable_buffer_with_offset(buffer_index, offset))?;
if likely!(bytes_read != mutable_buffer.len())
{
return Ok((bytes_read, Some(VectoredBufferOffset::new(buffer_index, offset + bytes_read))))
}
bytes_read
};
buffer_index.increment_unchecked();
let mut total_bytes_read = bytes_read;
while self.is_valid_buffer_index(buffer_index)
{
let bytes_read = read.read(self.get_mutable_buffer(buffer_index))?;
total_bytes_read += bytes_read;
if likely!(bytes_read != mutable_buffer.len())
{
return Ok((total_bytes_read, Some(VectoredBufferOffset::new(buffer_index, bytes_read))))
}
buffer_index.increment_unchecked();
}
Ok((total_bytes_read, None))
}
/// Use instead of `::std::io::Write.write()`.
///
/// Returns a tuple of `(bytes_written, next_offset)`.
///
/// If the `VectoredBuffer` has nothing more to write, `next_offset` will be `None`.
#[inline(always)]
pub fn write_from(&self, write: &mut impl Write, starting_at: VectoredBufferOffset) -> io::Result<(usize, Option<VectoredBufferOffset>)>
{
let mut buffer_index = starting_at.buffer_index;
let offset = starting_at.offset;
let bytes_written = write.write(self.get_immutable_buffer_with_offset(buffer_index, offset))?;
if likely!(bytes_written != immutable_buffer.len())
{
return Ok((bytes_written, Some(VectoredBufferOffset::new(buffer_index, offset + bytes_written))))
}
buffer_index.increment_unchecked();
let mut total_bytes_written = bytes_written;
while self.is_valid_buffer_index(buffer_index)
{
let bytes_written = write.write(self.get_immutable_buffer(buffer_index))?;
total_bytes_written += bytes_written;
if likely!(bytes_written != immutable_buffer.len())
{
return Ok((total_bytes_written, Some(VectoredBufferOffset::new(buffer_index, bytes_written))))
}
buffer_index.increment_unchecked();
}
Ok((total_bytes_written, None))
}
#[inline(always)]
pub(crate) fn get_immutable_buffer_with_offset(&self, buffer_index: BufferIndex, offset: InclusiveFromOffset) -> Ref<'b, [u8]>
{
let immutable_buffer = self.get_immutable_buffer(buffer_index);
Ref::map(immutable_buffer, |immutable_buffer| &immutable_buffer[ofset .. ])
}
#[inline(always)]
fn get_immutable_buffer(&self, buffer_index: BufferIndex) -> Ref<'b, [u8]>
{
self.buffers.borrow(buffer_index)
}
#[inline(always)]
fn get_mutable_buffer_with_offset(&self, buffer_index: BufferIndex, offset: InclusiveFromOffset) -> RefMut<'b, [u8]>
{
let mutable_buffer = self.get_mutable_buffer(buffer_index);
RefMut::map(mutable_buffer, |mutable_buffer| &mut mutable_buffer[ofset .. ])
}
#[inline(always)]
fn get_mutable_buffer(&self, buffer_index: BufferIndex) -> RefMut<'b, [u8]>
{
self.buffers.borrow_mut(buffer_index)
}
}
impl VectoredBuffer
{
/// Creates a new instance and gives it an initial buffer from `buffer_arena`.
///
/// The `buffer_arena` is not stored with the `VectoredBuffer`, allowing one to use buffers from different arenas.
///
/// The `allocation_observer_identifier` is usually a HTTP connection identifier, and can be used, along with the AllocationObserver in BufferArena, to prevent greedy use of buffers by a particular connection.
///
/// For instance `allocation_observer_identifier` could transmute into a reference to a struct which hold a count of how many buffers have been allocated for a connection.
///
/// It could also double as a `mio` `Token`.
///
/// On AMD 64-bit systems, the lowest 3 bits and highest 12 bits can also be used as a packed pointer.
#[inline(always)]
pub fn new(allocation_observer_identifier: AllocationObserverIdentifier, buffer_arena: &Arc<BufferArena<impl AllocationObserver>>) -> Result<(Self, BufferIndex), ()>
{
let mut this = Self
{
buffers: RingBuffer::new(),
allocation_observer_identifier,
};
let buffer_index = this.allocate_another_buffer(buffer_arena)?;
Ok((this, buffer_index))
}
/// Allocates another buffer.
///
/// Returns an Err if:-
///
/// * The underlying ring buffer for this VectoredBuffer is full;
/// * There are no buffers left to allocate (effectively out of memory in the chosen `buffer_arena`);
///
/// Otherwise returns the `BufferIndex` of the allocated buffer.
#[inline(always)]
pub fn allocate_another_buffer(&mut self, buffer_arena: &Arc<BufferArena<impl AllocationObserver>>) -> Result<BufferIndex, ()>
{
if self.buffers.is_full()
{
return Err(())
}
match BufferArena::allocate(buffer_arena, self.allocation_observer_identifier)
{
None => Err(()),
Some(buffer_reference) => Ok(self.buffers.insert_unchecked(buffer_reference)),
}
}
/// Retires (drops) a buffer if there is more than one.
///
/// In practice, a buffer is only freed when all `BufferReference`s to it are dropped.
///
/// Retiring a buffer creates space in the VectoredBuffer for more buffers to be added.
///
/// If there is only one buffer remaining, it will not retire it.
///
/// Returns `true` while there are still more buffers that could be retired.
pub fn retire_oldest_buffer(&mut self) -> bool
{
if unlikely!(self.buffers.length() <= 1)
{
return false
}
self.buffers.remove_unchecked();
false
}
/// Recycles a buffer if there is more than one, taking the oldest buffer and making it the youngest.
///
/// Returns the buffer index of the inserted buffer.
///
/// This is equivalent to `retire_oldest_buffer()` followed by `allocate_another_buffer()` but much more efficient.
///
/// However, if some other code holds a reference to this buffer, and the contents are subsequently over-written, that code will now be looking at invalid memory.
pub fn recycle_oldest_buffer(&mut self) -> Option<BufferIndex>
{
if unlikely!(self.buffers.length() <= 1)
{
return None
}
let buffer_reference = self.buffers.remove_unchecked_and_undropped();
let buffer_index = self.buffers.insert_unchecked(buffer_reference);
Some(buffer_index)
}
/// Is `buffer_index` valid?
#[inline(always)]
pub fn is_valid_buffer_index(&self, buffer_index: BufferIndex) -> bool
{
buffer_index != self.number_of_buffers()
}
/// Number of buffers.
#[inline(always)]
pub fn number_of_buffers(&self) -> usize
{
self.buffers.length()
}
}
| true
|
0223df6794c841dfdb32defac9a55538c131c16b
|
Rust
|
HadrienG2/bacara
|
/src/lib.rs
|
UTF-8
| 36,294
| 3.234375
| 3
|
[] |
no_license
|
//! This crate provides a Bitmap Allocator for Concurrent Applications with
//! Real-time Activities. You can use it for your dynamic memory allocation
//! needs in those real-time threads where the system memory allocator should
//! not be used because of its unpredictable execution time.
//!
//! The implementation is both thread-safe and real-time-safe: after
//! construction (which is RT-unsafe), you can use this allocator to allocate
//! and liberate memory from multiple RT threads, and none of them will acquire
//! a lock or call into any operating system facility in the process.
//!
//! In terms of progress guarantee, we guarantee lock-freedom but not
//! wait-freedom: no thread can prevent other threads from making progress by
//! freezing or crashing at the wrong time, but a thread hammering the allocator
//! in a tight loop can slow down other threads concurrently trying to allocate
//! memory to an unpredictable degree. As long as you keep use of this allocator
//! reasonably infrequent, this shouldn't be a problem in practice.
//!
//! In the absence of such heavy concurrent interference, worst-case execution
//! times grow linearly with the allocator's bitmap size (see below).
//!
//! # Bitmap allocation primer
//!
//! A bitmap allocator is a general-purpose memory allocator: it allows
//! allocating variable-sized buffers from its backing store, and later on
//! deallocating them individually and in any order.
//!
//! This allocation algorithm works by dividing the buffer of memory that it is
//! managing (which we'll call **backing store**) into evenly sized **blocks**,
//! and tracking which blocks are in use using an array of bits, a **bitmap**.
//!
//! Allocation is done by scanning the bitmap for a suitably large hole
//! (continuous sequence of zeroes), filling that hole with ones, and mapping
//! the hole's index in the bitmap into a pointer within the backing store.
//! Deallocation is done by mapping back from the user-provided pointer to a
//! range of indices within the bitmap and resetting those bits to zero.
//!
//! The **block size** is the most important tuning parameter of a bitmap
//! allocator, and should be chosen wisely:
//!
//! - Because the allocation overhead and bitmap size are proportional to the
//! number of blocks managed by the allocator, the CPU and memory overhead of
//! a bitmap allocator will grow as its block size shrinks. From this
//! perspective, using the highest block size you can get away with is best.
//! - But since allocations are tracked with block granularity, higher block
//! sizes mean less efficient use of the backing store, as the allocator is
//! more likely to allocate more memory than the client needs.
//!
//! Furthermore, pratical implementations of bitmap allocation on modern
//! non-bit-addressable hardware will reach their peak CPU efficiency when
//! processing allocation requests whose size is an implementation-defined
//! multiple of the block size, which we will refer to as a **superblock**.
//! Depending on your requirements, you may want to tune superblock size rather
//! than block size, which is why our API will allow you to do both.
//!
//! You should tune your (super)block size based on the full range of envisioned
//! allocation workloads, and even consider instantiating multiple allocators
//! with different block sizes if your allocation patterns vary widely, because
//! a block size that is a good compromise for a given allocation pattern may be
//! a less ideal choice for another allocation pattern.
//!
//! # Example
//!
//! FIXME: Oh yes I do need those, but API must be done first ;)
mod allocation;
mod bitmap;
mod builder;
mod hole;
use crate::{bitmap::AtomicSuperblockBitmap, hole::HoleSearch};
#[cfg(test)]
use require_unsafe_in_body::require_unsafe_in_bodies;
use std::{
alloc::{self, Layout},
mem::{self, ManuallyDrop, MaybeUninit},
ptr::NonNull,
sync::atomic::{self, Ordering},
};
// Re-export allocator builder at the crate root
pub use builder::Builder as AllocatorBuilder;
// Re-export some building blocks for other modules' use
pub(crate) use crate::{bitmap::SuperblockBitmap, hole::Hole};
/// Number of blocks in a superblock
///
/// This is what's publicly exposed as Allocator::BLOCKS_PER_SUPERBLOCK, but
/// it's also internally exposed as a module-level const so that it's shorter
/// and can be brought into scope with "use".
pub(crate) const BLOCKS_PER_SUPERBLOCK: usize = mem::size_of::<SuperblockBitmap>() * 8;
/// A thread-safe bitmap allocator
pub struct Allocator {
/// Beginning of the backing store from which we'll be allocating memory
///
/// Guaranteed by `Builder` to contain an integer number of superblocks,
/// each of which maps into one `AtomicUsize` in usage_bitmap.
backing_store_start: NonNull<MaybeUninit<u8>>,
/// Bitmap tracking usage of the backing store's storage blocks
///
/// The backing store is divided into blocks of `Self::block_size()` bytes,
/// and each bit of this bitmap tracks whether a block is allocated or free,
/// in backing store order.
///
/// Because modern CPUs are not bit-addressable, we must manipulate our bits
/// in bunches, via unsigned integers. This leads to the creation of a new
/// artificial storage granularity, tracked by a full integer-sized bunch of
/// bits in the bitmap, which we call a superblock.
usage_bitmap: Box<[AtomicSuperblockBitmap]>,
/// Bitshift-based representation of the block size
///
/// This odd value storage is not a space optimization, but a way to tell
/// the compiler's optimizer that the block size has to be a power of two so
/// that it optimizes our integer divisions and remainders. Please use
/// methods like block_size() to query the block size.
block_size_shift: u8,
/// Backing store alignment (and thus storage block alignment)
///
/// We must keep track of this because Rust's allocator API will expect us
/// to give back this information upon deallocating the backing store.
alignment: usize,
/// Memory locks that we need to release before deallocating memory on Drop
locks: ManuallyDrop<Box<[region::LockGuard]>>,
}
// `require_unsafe_in_bodies` is only enabled on development builds because it
// requires procedural macros and that adds a lot to crate build times.
#[cfg_attr(test, require_unsafe_in_bodies)]
impl Allocator {
/// Start building an allocator
///
/// See the `Builder` documentation for more details on the subsequent
/// allocator configuration process.
pub const fn builder() -> AllocatorBuilder {
AllocatorBuilder::new()
}
/// Allocator constructor proper, without invariant checking
///
/// This method mostly exists as an implementation detail of `Builder`, and
/// there is no plan to make it public at the moment since I cannot think of
/// a single reason to do so. You're not really building Allocators in a
/// tight loop, are you?
///
/// # Safety
///
/// The block_align, block_size and capacity parameters may be assumed to
/// uphold all the preconditions listed as "must" bullet points in the
/// corresponding `Builder` struct members' documentation, either in this
/// constructor or other methods of Allocator.
#[cfg_attr(not(test), allow(unused_unsafe))]
pub(crate) unsafe fn new_unchecked(
block_align: usize,
block_size: usize,
capacity: usize,
) -> Self {
// Allocate the backing store
//
// This is safe because we've checked all preconditions of `Layout`
// and `alloc()` during the `Builder` construction process, including
// the fact that capacity is not zero which is the one thing that makes
// `alloc::alloc()` unsafe.
let backing_store_layout = Layout::from_size_align(capacity, block_align)
.expect("All Layout preconditions should have been checked");
let backing_store_start = NonNull::new(unsafe { alloc::alloc(backing_store_layout) })
.unwrap_or_else(|| alloc::handle_alloc_error(backing_store_layout))
.cast::<MaybeUninit<u8>>();
// Build the usage-tracking bitmap
let superblock_size = block_size * BLOCKS_PER_SUPERBLOCK;
let mut usage_bitmap = std::iter::repeat(SuperblockBitmap::EMPTY)
.map(AtomicSuperblockBitmap::new)
.take(capacity / superblock_size)
.collect::<Box<[_]>>();
// Try to force the underlying operating system to keep our owned memory
// allocations into RAM, instead of allowing its usual virtual memory
// tricks that can lead memory reads and writes to become RT-unsafe.
let locks = ManuallyDrop::new(
[
(
backing_store_start.as_ptr().cast::<u8>(),
capacity,
"backing store",
),
(
usage_bitmap.as_mut_ptr().cast::<u8>(),
usage_bitmap.len() * mem::size_of::<SuperblockBitmap>(),
"usage bitmap",
),
]
.iter()
.flat_map(|&(start, size, name)| {
region::lock(start, size).map_err(|err| {
// I'd like to use a proper logger here, but I
// intend to use this allocator in a Log impl...
if cfg!(debug_assertions) {
eprintln!("WARNING: Failed to lock {} memory: {}", name, err);
}
})
})
.collect::<Box<[_]>>(),
);
// Build and return the allocator struct
Allocator {
backing_store_start,
usage_bitmap,
block_size_shift: block_size.trailing_zeros() as u8,
alignment: block_align,
locks,
}
}
/// Implementation-defined number of storage blocks per superblock
///
/// This is the multiplicative factor between a bitmap allocator's block
/// size and its superblock size. That quantity is machine-dependent and
/// subjected to change in future versions of this crate, so please always
/// use this constant instead of relying on past values from it.
pub const BLOCKS_PER_SUPERBLOCK: usize = BLOCKS_PER_SUPERBLOCK;
/// Block alignment of this allocator (in bytes)
///
/// Every block managed by the allocator is guaranteed to have this
/// memory alignment "for free".
pub const fn block_alignment(&self) -> usize {
self.alignment
}
/// Block size of this allocator (in bytes)
///
/// This is the granularity at which the allocator's internal bitmap tracks
/// which regions of the backing store are used and unused.
pub const fn block_size(&self) -> usize {
1 << (self.block_size_shift as u32)
}
/// Superblock size of this allocator (in bytes)
///
/// This is the allocation granularity at which this allocator should
/// exhibit optimal CPU performance.
pub const fn superblock_size(&self) -> usize {
self.block_size() * BLOCKS_PER_SUPERBLOCK
}
/// Size of this allocator's backing store (in bytes)
///
/// This is the maximal amount of memory that may be allocated from this
/// allocator, assuming no memory waste due to unused block bytes and no
/// fragmentation issues.
pub fn capacity(&self) -> usize {
self.usage_bitmap.len() * self.superblock_size()
}
// TODO: Add a safe `alloc_bound` API based on a Box-ish abstraction that
// auto-deallocates and auto-derefs, guaranteed non-dangling by the
// use of a lifetime-based API.
//
// Warn that it has a performance cost (need a back-reference to the
// home Allocator) and an ergonomics cost (hard to store allocator +
// allocations together as that's a self-referential struct.
/// Allocate a memory buffer, without borrow checking
///
/// Returns a pointer to the allocated slice if successful, or `None` if the
/// allocator is not able to satisfy this request because not enough
/// contiguous storage is available in its backing store.
///
/// This function is not unsafe per se, in the sense that no undefined
/// behavior can occur as a direct result of calling it. However, you should
/// really make sure that the output buffer pointer is passed back to
/// `Allocator::dealloc_unbound()` before the allocator is dropped.
///
/// Otherwise, the pointer will be invalidated, and dereferencing it after
/// that _will_ unleash undefined behavior.
//
// TODO: Prepare support for overaligned allocations and `GlobalAlloc` impl
// by accepting `std::alloc::Layout`. Initially, we can just return
// None when the requested alignment is higher than self.alignment.
pub fn alloc_unbound(&self, size: usize) -> Option<NonNull<[MaybeUninit<u8>]>> {
// Detect and report unrealistic requests in debug builds
debug_assert!(
size < self.capacity(),
"Requested size is above allocator capacity"
);
// Handle the zero-sized edge case
//
// TODO: We'll need to revise this if we ever allow overaligned allocs.
if size == 0 {
return Some(
// This is safe because...
// - The backing store pointer is obviously valid for 0 elements
// - It has the minimal alignment we promise to always offer
// - Lifetimes don't matter as we're building a raw pointer
// - We won't overflow isize with a zero-length slice
// - &mut aliasing is not an issue for zero-sized slices.
unsafe { std::slice::from_raw_parts_mut(self.backing_store_start.as_ptr(), 0) }
.into(),
);
}
// Convert requested size to a number of requested blocks
let num_blocks = div_round_up(size, self.block_size());
// Start looking for suitable free memory "holes" in the usage bitmap
//
// TODO: Always starting at the beginning isn't nice because we keep
// hammering the same first superblocks while latter ones are
// most likely to be free. Consider adding a state variable to
// pick up at the superblock which we left off. It's just a
// perf hint so atomic loads/stores should be enough.
//
// We could go one step further and store both a superblock
// index and a block index within that variable, via some
// bit-packing trick : |superblock_idx|block_idx|. But this
// puts an implementation limitation on the amount of
// superblocks which we can track and in the end we're still
// hitting the same superblock, so it may not be worthwhile.
//
// We should update this state as quickly as possible, so that
// other threads move away from the hole that we're looking
// at, but not too often as otherwise the state variable would
// be hammered too hard.
//
// Random index selection isn't a good strategy here as it
// will lead to fragmentation, which will make large
// allocations fail all the time.
//
// Generally speaking, this shouldn't be investigated before
// we have benchmarks, first because the perf tradeoffs are
// subtle and second because it will split the bitmap in two
// halves that join between the end of the second half and the
// beginning of the first half, and the complexity must be
// justified by some proven substantial perf benefit.
let (mut hole_search, mut hole) = HoleSearch::new(
num_blocks,
self.usage_bitmap
.iter()
.map(|asb| asb.load(Ordering::Relaxed)),
);
// Try to allocate the current hole, retry on failure.
let first_block_idx = loop {
match allocation::try_alloc_hole(self, hole?, num_blocks) {
Ok(first_block_idx) => break first_block_idx,
Err((superblock_idx, observed_bitmap)) => {
hole = hole_search.retry(superblock_idx, observed_bitmap);
continue;
}
}
};
// Make sure that the previous reads from the allocation bitmap are
// ordered before any subsequent access to the buffer by the current
// thread, to avoid data races with the thread that deallocated the
// memory that we are in the process of allocating.
atomic::fence(Ordering::Acquire);
// Translate our allocation's first block index into an actual memory
// address within the allocator's backing store. This is safe because...
//
// - The pointer has to be in bounds, since we got the block coordinates
// from the usage bitmap and by construction, there are no
// out-of-bounds blocks in the bitmap (remember that we rounded the
// requested allocation size to a multiple of the superblock size).
// - By construction, backing store capacity cannot be above
// `isize::MAX`, so in-bounds offsets shouldn't go above that limit.
// - Since we're targeting an allocation that we got from the system
// allocator, the address computation shouldn't overflow usize and
// wrap around.
let target_start = unsafe {
self.backing_store_start
.as_ptr()
.add(first_block_idx * self.block_size())
};
// Add requested length (_not_ actual allocation length) to turn this
// start-of-allocation pointer into an allocated slice. This is safe
// to do because...
//
// - If our allocation algorithm is correct, no one else currently holds
// a slice to this particular subset of the backing store.
// - If our allocation algorithm is correct, "size" bytes are in bounds
// - Lifetimes don't matter as we'll just turn this into a pointer
// - The backing store pointer cannot be null because the constructor
// aborts if allocation fails by returning a null pointer.
// - There is no alignment problem as we're building a NonNull<u8>
// - "size" cannot overflow isize because the backing store capacity is
// not allowed to do so.
let target_slice = unsafe { std::slice::from_raw_parts_mut(target_start, size) };
// Finally, we can build and return the output pointer
NonNull::new(target_slice as *mut _)
}
// TODO: Add a realloc_unbound() API, and a matching realloc() method to the
// higher-level buffer object returned by alloc_bound()
/// Deallocate a buffer that was previously allocated via `alloc_unbound`
///
/// # Safety
///
/// `ptr` must denote a block of memory currently allocated via this
/// allocator, i.e. it must have been generated by `alloc_unbound` without
/// further tampering and it should not have already been deallocated.
///
/// `ptr` will be dangling after calling this function, and should neither
/// be dereferenced nor passed to `dealloc_unbound` again. All references to
/// the target memory region must have been dropped before calling this API.
#[cfg_attr(not(test), allow(unused_unsafe))]
pub unsafe fn dealloc_unbound(&self, ptr: NonNull<[MaybeUninit<u8>]>) {
// In debug builds, check that the input pointer does come from our
// backing store, with all the properties that one would expect.
let ptr_start = ptr.cast::<MaybeUninit<u8>>().as_ptr();
let store_start = self.backing_store_start.as_ptr();
debug_assert!(
ptr_start >= store_start,
"Deallocated ptr starts before backing store"
);
let ptr_offset = (ptr_start as usize) - (store_start as usize);
debug_assert!(
ptr_offset < self.capacity(),
"Deallocated ptr starts after backing store"
);
debug_assert_eq!(
ptr_offset % self.block_size(),
0,
"Deallocated ptr doesn't start on a block boundary"
);
// Check pointer length as well. This is safe because we requested a
// valid pointer as part of this function's safety preconditions.
let ptr_len = unsafe { ptr.as_ref() }.len();
debug_assert!(
ptr_len < self.capacity() - ptr_offset,
"Deallocated ptr overflows backing store"
);
// NOTE: ptr_len may not be a multiple of the block size because we
// allow users to under-allocate blocks
// Do not do anything beyond that for zero-sized allocations, by
// definition they have no associated storage block to be freed
if ptr_len == 0 {
return;
}
// Make sure that the subsequent writes to the allocation bitmap are
// ordered after any previous access to the buffer by the current
// thread, to avoid data races with other threads concurrently
// reallocating and filling the memory that we are liberating.
atomic::fence(Ordering::Release);
// Switch to block coordinates and call the deallocator
//
// This is safe if the conversion between pointer+len and block
// coordinates is correct, because our safety contract requires that the
// input pointer is a good candidate for deallocation.
let start_idx = ptr_offset / self.block_size();
let num_blocks = div_round_up(ptr_len, self.block_size());
unsafe {
allocation::dealloc_blocks(self, start_idx, num_blocks);
}
}
/// Try to atomically allocate a full superblock
///
/// Returns observed superblock allocation bitfield on failure.
///
/// This operation has `Relaxed` memory ordering and must be followed by an
/// `Acquire` memory barrier in order to avoid allocation being reordered
/// after usage of the memory block by the compiler or CPU.
pub(crate) fn try_alloc_superblock(
&self,
superblock_idx: usize,
) -> Result<(), SuperblockBitmap> {
debug_assert!(
superblock_idx < self.usage_bitmap.len(),
"Superblock index is out of bitmap range"
);
self.usage_bitmap[superblock_idx].try_alloc_all(Ordering::Relaxed, Ordering::Relaxed)
}
/// Try to atomically allocate a sequence of blocks within a superblock
///
/// Returns observed superblock allocation bitfield on failure.
///
/// This operation has `Relaxed` memory ordering and must be followed by an
/// `Acquire` memory barrier in order to avoid allocation being reordered
/// after usage of the memory block by the compiler or CPU.
pub(crate) fn try_alloc_mask(
&self,
superblock_idx: usize,
mask: SuperblockBitmap,
) -> Result<(), SuperblockBitmap> {
debug_assert!(
superblock_idx < self.usage_bitmap.len(),
"Superblock index is out of bitmap range"
);
self.usage_bitmap[superblock_idx].try_alloc_mask(mask, Ordering::Relaxed, Ordering::Relaxed)
}
/// Atomically deallocate a (right-exclusive) range of superblocks
///
/// This operation has `Relaxed` memory ordering and must be preceded by a
/// `Release` memory barrier in order to avoid deallocation being reordered
/// before usage of the memory block by the compiler or CPU.
///
/// # Safety
///
/// This function must not be targeted at a superblock which is still in
/// use, either partially or entirely, otherwise many forms of undefined
/// behavior will occur (&mut aliasing, race conditions, double-free...).
pub(crate) unsafe fn dealloc_superblocks(
&self,
start_superblock_idx: usize,
end_superblock_idx: usize,
) {
debug_assert!(
start_superblock_idx < self.usage_bitmap.len(),
"First superblock index is out of bitmap range"
);
debug_assert!(
end_superblock_idx <= self.usage_bitmap.len(),
"Last superblock index is out of bitmap range"
);
for superblock in &self.usage_bitmap[start_superblock_idx..end_superblock_idx] {
superblock.dealloc_all(Ordering::Relaxed);
}
}
/// Atomically deallocate a sequence of blocks within a superblock
///
/// This operation has `Relaxed` memory ordering and must be preceded by a
/// `Release` memory barrier in order to avoid deallocation being reordered
/// before usage of the memory block by the compiler or CPU.
///
/// # Safety
///
/// This function must not be targeted at a blocks which are still in
/// use, either partially or entirely, otherwise many forms of undefined
/// behavior will occur (&mut aliasing, race conditions, double-free...).
pub(crate) unsafe fn dealloc_mask(&self, superblock_idx: usize, mask: SuperblockBitmap) {
debug_assert!(
superblock_idx < self.usage_bitmap.len(),
"Superblock index is out of bitmap range"
);
self.usage_bitmap[superblock_idx].dealloc_mask(mask, Ordering::Relaxed);
}
}
impl Drop for Allocator {
// I disagree with clippy here because I do not use
// `AtomicSuperblock::get_mut` to write data, but only to read it.
#[allow(clippy::debug_assert_with_mut_call)]
fn drop(&mut self) {
// Make sure that no storage blocks were allocated, as the corresponding
// pointers will become dangling when the allocator is dropped.
debug_assert!(
self.usage_bitmap
.iter_mut()
.all(|bits| *bits.get_mut() == SuperblockBitmap::EMPTY),
"Allocator was dropped while there were still live allocations"
);
// If owned storage allocations were successfully locked, unlock them.
// This is safe because we called it before deallocating anything and
// we're not going to use self.locks afterwards.
unsafe { ManuallyDrop::drop(&mut self.locks) };
// Deallocate the backing store. This is safe because...
// - An allocator is always created with a backing store allocation
// - Only Drop, which happens at most once, can liberate that allocation
// - The layout matches that used in `Allocator::new_unchecked()`
let backing_store_layout = Layout::from_size_align(self.capacity(), self.alignment)
.expect("All Layout preconditions were checked by builder");
unsafe {
alloc::dealloc(
self.backing_store_start.cast::<u8>().as_ptr(),
backing_store_layout,
);
}
}
}
// TODO: Implement GlobalAlloc trait? Will require lazy_static/OnceCell until
// Builder can be made const fn, but people might still find it useful for
// certain use cases...
//
// To allow this, I must not use the names alloc(), dealloc(),
// alloc_zeroed() and realloc() in the inherente Allocator API.
//
// GlobalAlloc methods must not unwind. Since pretty much everything can
// panic in Rust and I love assertions, I'll probably want to use
// catch_unwind, then return a null pointer if possible and call
// alloc::handle_alloc_error myself otherwise.
//
// Obviously, GlobalAlloc layout expectations must also be upheld,
// including alignment. Until I support overaligned allocations (if ever),
// this will entail erroring out when requested alignment is higher than
// global block alignment.
//
// If I do this, I should mention it in the crate documentation, along
// with the fact that it's only suitable for specific use cases (due to
// limited capacity, and possibly no overalignment ability)
/// Small utility to divide two integers, rounding the result up
fn div_round_up(x: usize, y: usize) -> usize {
// Check interface preconditions in debug builds
debug_assert!(y != 0, "Attempted to divide by zero");
// Return rounded division result
(x / y) + (x % y != 0) as usize
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn div_round_up_() {
assert_eq!(div_round_up(0, 1), 0);
assert_eq!(div_round_up(1, 1), 1);
assert_eq!(div_round_up(2, 1), 2);
assert_eq!(div_round_up(0, 2), 0);
assert_eq!(div_round_up(1, 2), 1);
assert_eq!(div_round_up(2, 2), 1);
assert_eq!(div_round_up(3, 2), 2);
assert_eq!(div_round_up(4, 2), 2);
assert_eq!(div_round_up(0, 3), 0);
assert_eq!(div_round_up(1, 3), 1);
assert_eq!(div_round_up(2, 3), 1);
assert_eq!(div_round_up(3, 3), 1);
assert_eq!(div_round_up(4, 3), 2);
assert_eq!(div_round_up(5, 3), 2);
assert_eq!(div_round_up(6, 3), 2);
}
#[test]
fn builder() {
assert_eq!(Allocator::builder(), AllocatorBuilder::new());
// NOTE: `AllocatorBuilder` is tested in builder.rs
}
#[test]
fn initial_state() {
for alignment in [1, 2, 4, 8].iter().copied() {
'bs: for block_size in [1, 2, 4, 8, 16, 256, 1024, 4096, 8192].iter().copied() {
if block_size < alignment {
continue 'bs;
}
let superblock_size = block_size * BLOCKS_PER_SUPERBLOCK;
for num_superblocks in [1, 2, 3, 4, 5, 6, 7, 8].iter().copied() {
let capacity = num_superblocks * superblock_size;
let mut allocator = Allocator::builder()
.alignment(alignment)
.block_size(block_size)
.capacity(capacity)
.build();
assert_eq!(allocator.block_alignment(), alignment);
assert_eq!(allocator.block_size(), block_size);
assert_eq!(allocator.superblock_size(), superblock_size);
assert_eq!(allocator.capacity(), capacity);
let start_address = allocator.backing_store_start.as_ptr() as usize;
assert!(start_address >= region::page::size());
assert_eq!(start_address % alignment, 0);
assert_eq!(allocator.usage_bitmap.len(), num_superblocks);
// TODO: Deduplicate this recurring check
assert!(allocator
.usage_bitmap
.iter_mut()
.map(|asb| *asb.get_mut())
.all(|sb| sb == SuperblockBitmap::EMPTY));
assert_eq!(
allocator.block_size_shift,
block_size.trailing_zeros() as u8
);
assert_eq!(allocator.alignment, alignment);
}
}
}
}
// NOTE: No need to test accessors outside of initial_state because
// Allocator methods only allow modifying the usage_bitmap.
#[test]
fn blocks_per_superblock() {
assert_eq!(BLOCKS_PER_SUPERBLOCK, Allocator::BLOCKS_PER_SUPERBLOCK);
}
#[test]
fn superblock_allocs() {
let allocator = Allocator::builder()
.alignment(8)
.block_size(64)
.capacity(3 * 64 * BLOCKS_PER_SUPERBLOCK)
.build();
for idx in 0..allocator.usage_bitmap.len() {
assert_eq!(allocator.try_alloc_superblock(idx), Ok(()));
let check_allocated = |curr_bitmap| {
for (idx2, asb) in allocator.usage_bitmap.iter().enumerate() {
let superblock = asb.load(Ordering::Relaxed);
if idx2 == idx {
assert_eq!(superblock, curr_bitmap);
} else {
assert_eq!(superblock, SuperblockBitmap::EMPTY);
}
}
};
check_allocated(SuperblockBitmap::FULL);
assert_eq!(
allocator.try_alloc_superblock(idx),
Err(SuperblockBitmap::FULL)
);
check_allocated(SuperblockBitmap::FULL);
for mask2_start in 0..BLOCKS_PER_SUPERBLOCK {
for mask2_len in 1..=(BLOCKS_PER_SUPERBLOCK - mask2_start.max(1)) {
let mask2 = SuperblockBitmap::new_mask(mask2_start as u32, mask2_len as u32);
assert_eq!(
allocator.try_alloc_mask(idx, mask2),
Err(SuperblockBitmap::FULL)
);
check_allocated(SuperblockBitmap::FULL);
}
}
unsafe {
allocator.dealloc_superblocks(idx, idx + 1);
}
check_allocated(SuperblockBitmap::EMPTY);
}
}
#[test]
fn mask_allocs() {
let allocator = Allocator::builder()
.alignment(8)
.block_size(64)
.capacity(3 * 64 * BLOCKS_PER_SUPERBLOCK)
.build();
for idx in 0..allocator.usage_bitmap.len() {
for mask1_start in 0..BLOCKS_PER_SUPERBLOCK {
for mask1_len in 1..=(BLOCKS_PER_SUPERBLOCK - mask1_start.max(1)) {
let mask1 = SuperblockBitmap::new_mask(mask1_start as u32, mask1_len as u32);
assert_eq!(allocator.try_alloc_mask(idx, mask1), Ok(()));
let check_allocated = |curr_bitmap| {
for (idx2, asb) in allocator.usage_bitmap.iter().enumerate() {
let superblock = asb.load(Ordering::Relaxed);
if idx2 == idx {
assert_eq!(superblock, curr_bitmap);
} else {
assert_eq!(superblock, SuperblockBitmap::EMPTY);
}
}
};
check_allocated(mask1);
assert_eq!(allocator.try_alloc_superblock(idx), Err(mask1));
check_allocated(mask1);
for mask2_start in (0..BLOCKS_PER_SUPERBLOCK).step_by(5) {
for mask2_len in 1..=(BLOCKS_PER_SUPERBLOCK - mask2_start.max(1)) {
let mask2 =
SuperblockBitmap::new_mask(mask2_start as u32, mask2_len as u32);
let alloc2_res = allocator.try_alloc_mask(idx, mask2);
if mask1 & mask2 != SuperblockBitmap::EMPTY {
assert_eq!(alloc2_res, Err(mask1));
check_allocated(mask1);
} else {
assert_eq!(alloc2_res, Ok(()));
check_allocated(mask1 + mask2);
unsafe { allocator.dealloc_mask(idx, mask2) };
check_allocated(mask1);
}
}
}
unsafe { allocator.dealloc_mask(idx, mask1) };
check_allocated(SuperblockBitmap::EMPTY);
}
}
}
}
// TODO: Test alloc_unbound
// TODO: Test dealloc_unbound
}
// TODO: Add concurrent tests as well, obviously
// TODO: Benchmark at various block sizes and show a graph on README
//
// TODO: Look at assembly and make sure that power-of-two integer manipulation
// are properly optimized, force inlining of Allocator size queries and
// div_round_up if necessary.
| true
|
77d96c8893040defbf08419dcda885b6b9282db6
|
Rust
|
Norman0406/RustPlayground
|
/chat/server/src/util/response_stream.rs
|
UTF-8
| 1,254
| 2.578125
| 3
|
[] |
no_license
|
use futures::channel::oneshot;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::stream::Stream;
pub struct ResponseStream<T> {
inner: Pin<Box<dyn Stream<Item = T> + Send + Sync>>,
chan: Option<oneshot::Sender<bool>>,
}
impl<T> ResponseStream<T> {
pub fn new<S>(stream: S) -> ResponseStream<T>
where
S: Stream<Item = T> + Send + Sync + 'static,
{
ResponseStream {
inner: Box::pin(stream),
chan: None,
}
}
pub fn new_with_close_notification<S>(
chan: oneshot::Sender<bool>,
stream: S,
) -> ResponseStream<T>
where
S: Stream<Item = T> + Send + Sync + 'static,
{
ResponseStream {
inner: Box::pin(stream),
chan: Some(chan),
}
}
}
impl<T> Stream for ResponseStream<T> {
type Item = T;
fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
self.inner.as_mut().as_mut().poll_next(cx)
}
}
impl<T> Drop for ResponseStream<T> {
fn drop(&mut self) {
let chan = self.chan.take();
if let Some(chan) = chan {
chan.send(true).unwrap();
}
}
}
| true
|
414206d9c0b07fbf701da81bc7bffd63a39afd75
|
Rust
|
ritchie46/lsh-rs
|
/examples/neural-network/src/activations.rs
|
UTF-8
| 784
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
use ndarray::prelude::*;
pub enum Activation {
ReLU,
None,
Sigmoid,
}
impl Activation {
pub fn activate(&self, z: f32) -> f32 {
use Activation::*;
match self {
ReLU => {
if z > 0. {
z
} else {
0.
}
}
None => z,
Sigmoid => 1. / (1. + (-z).exp()),
}
}
pub fn prime(&self, z: f32) -> f32 {
use Activation::*;
match self {
ReLU => {
if z > 0. {
1.
} else {
0.
}
}
None => 1.,
Sigmoid => self.activate(z) * (1. - self.activate(z)),
}
}
}
| true
|
1c158c6057287024f64e445627db5017d824177c
|
Rust
|
ManuelZierl/learn-rust
|
/src/polynoms.rs
|
UTF-8
| 1,484
| 3.65625
| 4
|
[] |
no_license
|
// TASK 3 Roots of Polynomials
pub enum Polynomial<Cof,Exp>
{
Nil,
Cons(Cof, Exp, Box<Polynomial<Cof,Exp>>)
}
impl Polynomial<i32, i32>
{
pub fn new() -> Polynomial<i32,i32> { Polynomial::Nil }
pub fn print(&self)
{
match &self
{
Polynomial::Nil => {println!(" 0 "); },
Polynomial::Cons(c, e, l) => {print!("{}x^{} + ", c, e); l.print(); }
}
}
pub fn add(self, cof: i32, exp: i32) -> Polynomial<i32, i32>
{
Polynomial::Cons(cof, exp, Box::new(self))
}
pub fn evaluate(&self, x:f32) -> f32
{
match &self
{
Polynomial::Nil => {0.0},
Polynomial::Cons(c,e,l) => {(*c as f32)*x.powi(*e) + l.evaluate(x)}
}
}
pub fn derivative(&self) -> Polynomial<i32, i32>
{
match &self
{
Polynomial::Nil => { Polynomial::Nil}
Polynomial::Cons(_,0,l) => {l.derivative()}
Polynomial::Cons(c,e,l) =>
{
l.derivative().add(c*e,e-1)
}
}
}
fn newton_help(&self, x:f32) -> f32
{
let f_x = &self.evaluate(x);
let f_dx =&self.derivative().evaluate(x);
x - (f_x/f_dx)
}
pub fn newton(&self, mut x:f32) -> f32
{
let mut prec = 10.;
while prec > 0.1
{
let nx = self.newton_help(x);
prec = (x - nx).abs();
x = nx;
}
x
}
}
| true
|
1dcb01de80778246495ce5272be53126f3957583
|
Rust
|
jbham/json-rules-engine-rs
|
/src/event/email_notification.rs
|
UTF-8
| 2,756
| 2.6875
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
use crate::{event::EventTrait, Error};
use async_trait::async_trait;
use erased_serde::Serialize;
use futures_util::TryFutureExt;
use sendgrid::v3::{
Content, Email as SendGridEmail, Message, Personalization, Sender,
};
use serde_json::Value;
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub struct EmailNotification {
ty: String,
}
#[async_trait]
impl EventTrait for EmailNotification {
fn new() -> Self {
Self {
ty: "email_notification".to_string(),
}
}
fn get_type(&self) -> &str {
&self.ty
}
fn validate(&self, params: &HashMap<String, Value>) -> Result<(), String> {
if !(params.contains_key("to")
&& params.contains_key("from")
&& params.contains_key("title")
&& params.contains_key("message"))
{
return Err(
"At least one of 'to', 'from', 'title', 'message' is missing."
.to_string(),
);
}
Ok(())
}
async fn trigger(
&mut self,
params: &HashMap<String, Value>,
facts: &(dyn Serialize + Sync),
) -> Result<(), Error> {
let api_key = ::std::env::var("SENDGRID_API_KEY")
.expect("You must set 'SENDGRID_API_KEY' environment variable");
let sender = Sender::new(api_key);
let tos = params.get("to").unwrap().as_array().unwrap();
let from = params.get("from").unwrap().to_string();
let mut title = params.get("title").unwrap().to_string();
let mut message = params.get("message").unwrap().to_string();
let value = serde_json::from_str::<Value>(
&serde_json::to_string(facts).unwrap(),
)
.unwrap();
if let Ok(tmpl) = mustache::compile_str(&message)
.and_then(|template| template.render_to_string(&value))
{
message = tmpl;
}
if let Ok(tmpl) = mustache::compile_str(&title)
.and_then(|template| template.render_to_string(&value))
{
title = tmpl;
}
let personalization = {
let mut p =
Personalization::new(SendGridEmail::new(&tos[0].to_string()));
for to in tos.iter().skip(1) {
p = p.add_to(SendGridEmail::new(to.to_string()));
}
p
};
let m = Message::new(SendGridEmail::new(from))
.set_subject(&title)
.add_content(
Content::new()
.set_content_type("text/plain")
.set_value(message),
)
.add_personalization(personalization);
sender.send(&m).map_err(Error::from).await?;
Ok(())
}
}
| true
|
a1bb46071c02a0dd96c388243390c23542aa77a0
|
Rust
|
positively-charged/leetcode
|
/solutions/final-prices-with-a-special-discount-in-a-shop/solution.rs
|
UTF-8
| 624
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
struct Solution;
fn main() {
let r = Solution::final_prices( vec![8,4,6,2,3] );
println!( "{:?}", r );
let r = Solution::final_prices( vec![1,2,3,4,5] );
println!( "{:?}", r );
let r = Solution::final_prices( vec![10,1,1,6] );
println!( "{:?}", r );
}
impl Solution {
pub fn final_prices(mut prices: Vec<i32>) -> Vec<i32> {
for i in 0 .. prices.len() {
for j in i + 1 .. prices.len() {
if prices[ j ] <= prices[ i ] {
prices[ i ] -= prices[ j ];
break;
}
}
}
prices
}
}
| true
|
5e7eae8d30192feb30eb7c5b9bc24f66902bc6db
|
Rust
|
auno/adventofcode-2020
|
/05a/src/main.rs
|
UTF-8
| 498
| 3.140625
| 3
|
[] |
no_license
|
use std::io::{self, BufRead};
use std::cmp::max;
fn main() {
let lines: Vec<String> = io::stdin().lock().lines()
.map(|line| line.unwrap())
.collect();
let mut max_seat: u32 = 0;
for line in lines {
let line = line
.replace(|c| c == 'F' || c == 'L', "0")
.replace(|c| c == 'B' || c == 'R', "1");
let seat = u32::from_str_radix(&line, 2).unwrap();
max_seat = max(max_seat, seat);
}
println!("{}", max_seat);
}
| true
|
50de17b322d0777019ad6935fd361257ab5558c1
|
Rust
|
StarOrpheus/pascal-enty
|
/assets/format-keywords/src/main.rs
|
UTF-8
| 780
| 3.1875
| 3
|
[] |
no_license
|
use std::fs::File;
use std::io::{BufReader, BufRead};
fn main() {
let file = File::open("keywords.list").unwrap();
let mut tokenList = Vec::new();
for line in BufReader::new(file).lines() {
let line = line.unwrap();
if line.is_empty() {
continue;
}
let upperLine = line.trim().to_uppercase();
for c in upperLine.chars() {
print!("${} ", c);
}
print!("{}", " ".repeat(48 - 3 * upperLine.len()));
let tokenName= String::new() + "Token" + &upperLine;
print!("{{ {}{} }}", "\\_ -> ", tokenName);
tokenList.push(tokenName);
println!();
}
println!("Token list:\n");
for token in tokenList {
print!("{} | ", token);
}
println!();
}
| true
|
9f72208eb46d6c13f87cfbc8dad274c3b8357891
|
Rust
|
btwotwo/chip8rust
|
/src/registers.rs
|
UTF-8
| 1,646
| 3.578125
| 4
|
[] |
no_license
|
use std::ops::{Index, IndexMut};
#[derive(Debug)]
pub struct Registers {
regs: [u8; 16],
}
pub enum Position {
///Second 4 bits (0X00)
X,
///Third 4 bits (00Y0)
Y,
}
impl Registers {
pub fn new() -> Registers {
Registers { regs: [0; 16] }
}
pub fn add_immediate(&mut self, index: u8, value: u8) {
let (result, _) = self[index].overflowing_add(value);
self[index] = result;
}
pub fn get_index(opcode: u16, position: Position) -> u8 {
match position {
Position::X => ((opcode & 0x0F00) >> 8) as u8,
Position::Y => ((opcode & 0x00F0) >> 4) as u8,
}
}
pub fn set_carry(&mut self, carried: bool) {
if carried {
self[0xF] = 1;
} else {
self[0xF] = 0;
}
}
}
impl Index<u8> for Registers {
type Output = u8;
fn index(&self, index: u8) -> &Self::Output {
&self.regs[index as usize]
}
}
impl Index<(u16, Position)> for Registers {
type Output = u8;
fn index(&self, index: (u16, Position)) -> &Self::Output {
let index = Registers::get_index(index.0, index.1);
&self.regs[index as usize]
}
}
impl IndexMut<(u16, Position)> for Registers {
fn index_mut(&mut self, index: (u16, Position)) -> &mut Self::Output {
let index = Registers::get_index(index.0, index.1);
&mut self.regs[index as usize]
}
}
impl IndexMut<u8> for Registers {
fn index_mut(&mut self, index: u8) -> &mut Self::Output {
&mut self.regs[index as usize]
}
}
| true
|
37d7ebaae2dd7bbb97403c847d95624035482dc4
|
Rust
|
BafDyce/adventofcode
|
/2017/rust/day14/src/part2.rs
|
UTF-8
| 1,803
| 3.1875
| 3
|
[
"Unlicense"
] |
permissive
|
use std::collections::VecDeque;
pub fn solve(hashes: &Vec<String>) -> i32 {
let mut grid: Vec<bool> = Vec::new();
// fill grid
for hash in hashes {
for ch in hash.chars() {
let bin = if ch.is_digit(10) {
ch as u8 - '0' as u8
} else if ch.is_digit(16) {
ch as u8 - 'a' as u8 + 10
} else {
panic!("Invalid hex char: {}", ch);
};
let binary = format!("{:4b}", bin);
for ch in binary.chars() {
let used = match ch {
'0' | ' ' => false,
'1' => true,
_ => panic!("Invalid binary char: {}", ch),
};
grid.push(used);
}
}
}
assert_eq!(grid.len(), 128*128);
// count groups
let mut groups = 0;
for idx in 0..grid.len() {
if ! grid[idx] {
continue;
}
groups += 1;
grid[idx] = false;
let mut same_group = get_neighbor_list(idx);
while same_group.len() > 0 {
if let Some(nidx) = same_group.pop_front() {
if grid[nidx] {
grid[nidx] = false;
same_group.append( &mut get_neighbor_list(nidx) );
}
}
}
}
groups
}
fn get_neighbor_list(idx: usize) -> VecDeque<usize> {
let mut neighbors = VecDeque::new();
// left
if idx % 128 > 0 {
neighbors.push_back(idx - 1);
}
// right
if idx % 128 != 127 {
neighbors.push_back(idx + 1);
}
// up
if idx > 127 {
neighbors.push_back(idx - 128);
}
// down
if idx / 128 < 127 {
neighbors.push_back(idx + 128);
}
neighbors
}
| true
|
ac8c6948257e67e324ce7ec9e95f2635cd88888d
|
Rust
|
rust-bitcoin/rust-bitcoin
|
/fuzz/fuzz_targets/bitcoin/outpoint_string.rs
|
UTF-8
| 2,030
| 2.953125
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
use std::str::FromStr;
use bitcoin::blockdata::transaction::OutPoint;
use bitcoin::consensus::encode;
use honggfuzz::fuzz;
fn do_test(data: &[u8]) {
let lowercase: Vec<u8> = data
.iter()
.map(|c| match *c {
b'A' => b'a',
b'B' => b'b',
b'C' => b'c',
b'D' => b'd',
b'E' => b'e',
b'F' => b'f',
x => x,
})
.collect();
let data_str = match String::from_utf8(lowercase) {
Err(_) => return,
Ok(s) => s,
};
match OutPoint::from_str(&data_str) {
Ok(op) => {
assert_eq!(op.to_string().as_bytes(), data_str.as_bytes());
}
Err(_) => {
// If we can't deserialize as a string, try consensus deserializing
let res: Result<OutPoint, _> = encode::deserialize(data);
if let Ok(deser) = res {
let ser = encode::serialize(&deser);
assert_eq!(ser, data);
let string = deser.to_string();
match OutPoint::from_str(&string) {
Ok(destring) => assert_eq!(destring, deser),
Err(_) => panic!(),
}
}
}
}
}
fn main() {
loop {
fuzz!(|data| {
do_test(data);
});
}
}
#[cfg(all(test, fuzzing))]
mod tests {
fn extend_vec_from_hex(hex: &str, out: &mut Vec<u8>) {
let mut b = 0;
for (idx, c) in hex.as_bytes().iter().enumerate() {
b <<= 4;
match *c {
b'A'..=b'F' => b |= c - b'A' + 10,
b'a'..=b'f' => b |= c - b'a' + 10,
b'0'..=b'9' => b |= c - b'0',
_ => panic!("Bad hex"),
}
if (idx & 1) == 1 {
out.push(b);
b = 0;
}
}
}
#[test]
fn duplicate_crash() {
let mut a = Vec::new();
extend_vec_from_hex("00", &mut a);
super::do_test(&a);
}
}
| true
|
e3fc064eb55060c208b94cd94ce55843ef4941a5
|
Rust
|
AndyKovv/fjam
|
/src/user/email_manager.rs
|
UTF-8
| 4,372
| 3.171875
| 3
|
[] |
no_license
|
//! This module is email manager
//! All logic with email can be added here
//! If need add new implementation or new train add it here
use diesel::prelude::*;
use diesel::dsl::*;
use schema::users::dsl::*;
use schema::users;
use fjam_derive::integration_test;
use postgres_connection::pool;
use regex::Regex;
use user::errors::{EmailValidationError, EmailExistError};
use common::common_api_errors::{BasicApiError};
lazy_static! {
// Perform a simple regex match to validate email
static ref EMAIL_REGEX: regex::Regex = Regex::new("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$").unwrap();
}
#[derive(Debug)]
pub struct EmailManager {
/* Email manager struct use for validate email and check its in database */
pub email: String,
pub is_valid: bool,
pub exist: bool
}
impl EmailManager {
/*
Email manager use for check email exist
or validate email by regex
*/
pub fn new(raw_email: &str) -> Self {
Self{
email: raw_email.to_string(),
..Default::default()
}
}
pub fn valid(&mut self) -> Result<bool, Box<BasicApiError>> {
/* Method should validate income email via regex*/
match EMAIL_REGEX.is_match(&self.email) {
true => {
self.set_email_is_valid();
Ok(true)
},
false => Err(BasicApiError::new(
EmailValidationError::validation_error())
)
}
}
pub fn exist(&mut self) -> Result<bool, Box<BasicApiError>> {
/* Method should check email exist in database */
self.valid()?;
let conn = pool().get().unwrap();
let email_exist = users::table
.filter(users::email.eq(self.email.to_lowercase()))
.execute(&*conn);
match email_exist {
Ok(0) => Ok(false),
_ => {
self.set_email_exist();
Err(BasicApiError::new(
EmailExistError::email_exist())
)
},
}
}
fn set_email_exist(&mut self) {
/* Method should set email exit */
self.exist = true;
}
fn set_email_is_valid(&mut self) {
/* Method should set email is valid */
self.is_valid = true
}
}
impl Default for EmailManager {
fn default() -> Self {
Self {
email: "".to_string(),
is_valid: false,
exist: false
}
}
}
#[cfg(test)]
pub mod mail_manager_tests {
use super::*;
/*
Maybe need implemet state holder with constant for test?
I mean, when we create something we need owerride some default values.
Such as user_name, email, or etc.
Need think about this!
*/
fn set_up() {
let conn = pool().get().unwrap();
let _result = insert_into(users)
.values((users::email.eq("andy.kovv@gmail.com"), users::password.eq("my_shiney_password")))
.returning(id)
.get_results::<(i32)>(&*conn);
}
fn tear_down() {
let conn = pool().get().unwrap();
conn.execute("DELETE FROM users WHERE id > 0").unwrap();
}
#[test]
#[integration_test]
fn test_should_check_email_exist_in_database() {
let user_email = "andy.kovalev@email.com";
let mut email_manager = EmailManager::new(user_email);
assert_eq!(email_manager.exist().unwrap(), false);
let user_email_extist = "andy.kovv@gmail.com";
let mut email_manager = EmailManager::new(user_email_extist);
assert_eq!(email_manager.exist().is_err(), true);
}
#[test]
#[integration_test]
fn test_should_validate_income_email() {
let invalid_email = "email@-example.com";
let mut email_manager = EmailManager::new(invalid_email);
assert_eq!(email_manager.valid().is_err(), true);
let valid_email = "andy.kovv@gmail.com";
let mut email_manager = EmailManager::new(valid_email);
assert_eq!(email_manager.valid().unwrap(), true);
}
#[test]
#[integration_test]
fn test_should_check_status_in_structure() {
let user_email = "som@mail.com";
let mut manager = EmailManager::new(user_email);
manager.exist();
assert_eq!(manager.is_valid, true);
assert_eq!(manager.exist, false);
}
}
| true
|
f871d9443273c5308b95dc0d0ed6fbd339c1830f
|
Rust
|
microsoft/DiskANN
|
/rust/diskann/src/utils/kmeans.rs
|
UTF-8
| 13,754
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
/*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT license.
*/
#![warn(missing_debug_implementations, missing_docs)]
//! Aligned allocator
use rand::{distributions::Uniform, prelude::Distribution, thread_rng};
use rayon::prelude::*;
use std::cmp::min;
use crate::common::ANNResult;
use crate::utils::math_util::{calc_distance, compute_closest_centers, compute_vecs_l2sq};
/// Run Lloyds one iteration
/// Given data in row-major num_points * dim, and centers in row-major
/// num_centers * dim and squared lengths of ata points, output the closest
/// center to each data point, update centers, and also return inverted index.
/// If closest_centers == NULL, will allocate memory and return.
/// Similarly, if closest_docs == NULL, will allocate memory and return.
#[allow(clippy::too_many_arguments)]
fn lloyds_iter(
data: &[f32],
num_points: usize,
dim: usize,
centers: &mut [f32],
num_centers: usize,
docs_l2sq: &[f32],
mut closest_docs: &mut Vec<Vec<usize>>,
closest_center: &mut [u32],
) -> ANNResult<f32> {
let compute_residual = true;
closest_docs.iter_mut().for_each(|doc| doc.clear());
compute_closest_centers(
data,
num_points,
dim,
centers,
num_centers,
1,
closest_center,
Some(&mut closest_docs),
Some(docs_l2sq),
)?;
centers.fill(0.0);
centers
.par_chunks_mut(dim)
.enumerate()
.for_each(|(c, center)| {
let mut cluster_sum = vec![0.0; dim];
for &doc_index in &closest_docs[c] {
let current = &data[doc_index * dim..(doc_index + 1) * dim];
for (j, current_val) in current.iter().enumerate() {
cluster_sum[j] += *current_val as f64;
}
}
if !closest_docs[c].is_empty() {
for (i, sum_val) in cluster_sum.iter().enumerate() {
center[i] = (*sum_val / closest_docs[c].len() as f64) as f32;
}
}
});
let mut residual = 0.0;
if compute_residual {
let buf_pad: usize = 32;
let chunk_size: usize = 2 * 8192;
let nchunks =
num_points / chunk_size + (if num_points % chunk_size == 0 { 0 } else { 1 } as usize);
let mut residuals: Vec<f32> = vec![0.0; nchunks * buf_pad];
residuals
.par_iter_mut()
.enumerate()
.for_each(|(chunk, res)| {
for d in (chunk * chunk_size)..min(num_points, (chunk + 1) * chunk_size) {
*res += calc_distance(
&data[d * dim..(d + 1) * dim],
¢ers[closest_center[d] as usize * dim..],
dim,
);
}
});
for chunk in 0..nchunks {
residual += residuals[chunk * buf_pad];
}
}
Ok(residual)
}
/// Run Lloyds until max_reps or stopping criterion
/// If you pass NULL for closest_docs and closest_center, it will NOT return
/// the results, else it will assume appropriate allocation as closest_docs =
/// new vec<usize> [num_centers], and closest_center = new size_t[num_points]
/// Final centers are output in centers as row-major num_centers * dim.
fn run_lloyds(
data: &[f32],
num_points: usize,
dim: usize,
centers: &mut [f32],
num_centers: usize,
max_reps: usize,
) -> ANNResult<(Vec<Vec<usize>>, Vec<u32>, f32)> {
let mut residual = f32::MAX;
let mut closest_docs = vec![Vec::new(); num_centers];
let mut closest_center = vec![0; num_points];
let mut docs_l2sq = vec![0.0; num_points];
compute_vecs_l2sq(&mut docs_l2sq, data, num_points, dim);
let mut old_residual;
for i in 0..max_reps {
old_residual = residual;
residual = lloyds_iter(
data,
num_points,
dim,
centers,
num_centers,
&docs_l2sq,
&mut closest_docs,
&mut closest_center,
)?;
if (i != 0 && (old_residual - residual) / residual < 0.00001) || (residual < f32::EPSILON) {
println!(
"Residuals unchanged: {} becomes {}. Early termination.",
old_residual, residual
);
break;
}
}
Ok((closest_docs, closest_center, residual))
}
/// Assume memory allocated for pivot_data as new float[num_centers * dim]
/// and select randomly num_centers points as pivots
fn selecting_pivots(
data: &[f32],
num_points: usize,
dim: usize,
pivot_data: &mut [f32],
num_centers: usize,
) {
let mut picked = Vec::new();
let mut rng = thread_rng();
let distribution = Uniform::from(0..num_points);
for j in 0..num_centers {
let mut tmp_pivot = distribution.sample(&mut rng);
while picked.contains(&tmp_pivot) {
tmp_pivot = distribution.sample(&mut rng);
}
picked.push(tmp_pivot);
let data_offset = tmp_pivot * dim;
let pivot_offset = j * dim;
pivot_data[pivot_offset..pivot_offset + dim]
.copy_from_slice(&data[data_offset..data_offset + dim]);
}
}
/// Select pivots in k-means++ algorithm
/// Points that are farther away from the already chosen centroids
/// have a higher probability of being selected as the next centroid.
/// The k-means++ algorithm helps avoid poor initial centroid
/// placement that can result in suboptimal clustering.
fn k_meanspp_selecting_pivots(
data: &[f32],
num_points: usize,
dim: usize,
pivot_data: &mut [f32],
num_centers: usize,
) {
if num_points > (1 << 23) {
println!("ERROR: n_pts {} currently not supported for k-means++, maximum is 8388608. Falling back to random pivot selection.", num_points);
selecting_pivots(data, num_points, dim, pivot_data, num_centers);
return;
}
let mut picked: Vec<usize> = Vec::new();
let mut rng = thread_rng();
let real_distribution = Uniform::from(0.0..1.0);
let int_distribution = Uniform::from(0..num_points);
let init_id = int_distribution.sample(&mut rng);
let mut num_picked = 1;
picked.push(init_id);
let init_data_offset = init_id * dim;
pivot_data[0..dim].copy_from_slice(&data[init_data_offset..init_data_offset + dim]);
let mut dist = vec![0.0; num_points];
dist.par_iter_mut().enumerate().for_each(|(i, dist_i)| {
*dist_i = calc_distance(
&data[i * dim..(i + 1) * dim],
&data[init_id * dim..(init_id + 1) * dim],
dim,
);
});
let mut dart_val: f64;
let mut tmp_pivot = 0;
let mut sum_flag = false;
while num_picked < num_centers {
dart_val = real_distribution.sample(&mut rng);
let mut sum: f64 = 0.0;
for item in dist.iter().take(num_points) {
sum += *item as f64;
}
if sum == 0.0 {
sum_flag = true;
}
dart_val *= sum;
let mut prefix_sum: f64 = 0.0;
for (i, pivot) in dist.iter().enumerate().take(num_points) {
tmp_pivot = i;
if dart_val >= prefix_sum && dart_val < (prefix_sum + *pivot as f64) {
break;
}
prefix_sum += *pivot as f64;
}
if picked.contains(&tmp_pivot) && !sum_flag {
continue;
}
picked.push(tmp_pivot);
let pivot_offset = num_picked * dim;
let data_offset = tmp_pivot * dim;
pivot_data[pivot_offset..pivot_offset + dim]
.copy_from_slice(&data[data_offset..data_offset + dim]);
dist.par_iter_mut().enumerate().for_each(|(i, dist_i)| {
*dist_i = (*dist_i).min(calc_distance(
&data[i * dim..(i + 1) * dim],
&data[tmp_pivot * dim..(tmp_pivot + 1) * dim],
dim,
));
});
num_picked += 1;
}
}
/// k-means algorithm interface
pub fn k_means_clustering(
data: &[f32],
num_points: usize,
dim: usize,
centers: &mut [f32],
num_centers: usize,
max_reps: usize,
) -> ANNResult<(Vec<Vec<usize>>, Vec<u32>, f32)> {
k_meanspp_selecting_pivots(data, num_points, dim, centers, num_centers);
let (closest_docs, closest_center, residual) =
run_lloyds(data, num_points, dim, centers, num_centers, max_reps)?;
Ok((closest_docs, closest_center, residual))
}
#[cfg(test)]
mod kmeans_test {
use super::*;
use approx::assert_relative_eq;
use rand::Rng;
#[test]
fn lloyds_iter_test() {
let dim = 2;
let num_points = 10;
let num_centers = 3;
let data: Vec<f32> = (1..=num_points * dim).map(|x| x as f32).collect();
let mut centers = [1.0, 2.0, 7.0, 8.0, 19.0, 20.0];
let mut closest_docs: Vec<Vec<usize>> = vec![vec![]; num_centers];
let mut closest_center: Vec<u32> = vec![0; num_points];
let docs_l2sq: Vec<f32> = data
.chunks(dim)
.map(|chunk| chunk.iter().map(|val| val.powi(2)).sum())
.collect();
let residual = lloyds_iter(
&data,
num_points,
dim,
&mut centers,
num_centers,
&docs_l2sq,
&mut closest_docs,
&mut closest_center,
)
.unwrap();
let expected_centers: [f32; 6] = [2.0, 3.0, 9.0, 10.0, 17.0, 18.0];
let expected_closest_docs: Vec<Vec<usize>> =
vec![vec![0, 1], vec![2, 3, 4, 5, 6], vec![7, 8, 9]];
let expected_closest_center: [u32; 10] = [0, 0, 1, 1, 1, 1, 1, 2, 2, 2];
let expected_residual: f32 = 100.0;
// sort data for assert
centers.sort_by(|a, b| a.partial_cmp(b).unwrap());
for inner_vec in &mut closest_docs {
inner_vec.sort();
}
closest_center.sort_by(|a, b| a.partial_cmp(b).unwrap());
assert_eq!(centers, expected_centers);
assert_eq!(closest_docs, expected_closest_docs);
assert_eq!(closest_center, expected_closest_center);
assert_relative_eq!(residual, expected_residual, epsilon = 1.0e-6_f32);
}
#[test]
fn run_lloyds_test() {
let dim = 2;
let num_points = 10;
let num_centers = 3;
let max_reps = 5;
let data: Vec<f32> = (1..=num_points * dim).map(|x| x as f32).collect();
let mut centers = [1.0, 2.0, 7.0, 8.0, 19.0, 20.0];
let (mut closest_docs, mut closest_center, residual) =
run_lloyds(&data, num_points, dim, &mut centers, num_centers, max_reps).unwrap();
let expected_centers: [f32; 6] = [3.0, 4.0, 10.0, 11.0, 17.0, 18.0];
let expected_closest_docs: Vec<Vec<usize>> =
vec![vec![0, 1, 2], vec![3, 4, 5, 6], vec![7, 8, 9]];
let expected_closest_center: [u32; 10] = [0, 0, 0, 1, 1, 1, 1, 2, 2, 2];
let expected_residual: f32 = 72.0;
// sort data for assert
centers.sort_by(|a, b| a.partial_cmp(b).unwrap());
for inner_vec in &mut closest_docs {
inner_vec.sort();
}
closest_center.sort_by(|a, b| a.partial_cmp(b).unwrap());
assert_eq!(centers, expected_centers);
assert_eq!(closest_docs, expected_closest_docs);
assert_eq!(closest_center, expected_closest_center);
assert_relative_eq!(residual, expected_residual, epsilon = 1.0e-6_f32);
}
#[test]
fn selecting_pivots_test() {
let dim = 2;
let num_points = 10;
let num_centers = 3;
// Generate some random data points
let mut rng = rand::thread_rng();
let data: Vec<f32> = (0..num_points * dim).map(|_| rng.gen()).collect();
let mut pivot_data = vec![0.0; num_centers * dim];
selecting_pivots(&data, num_points, dim, &mut pivot_data, num_centers);
// Verify that each pivot point corresponds to a point in the data
for i in 0..num_centers {
let pivot_offset = i * dim;
let pivot = &pivot_data[pivot_offset..(pivot_offset + dim)];
// Make sure the pivot is found in the data
let mut found = false;
for j in 0..num_points {
let data_offset = j * dim;
let point = &data[data_offset..(data_offset + dim)];
if pivot == point {
found = true;
break;
}
}
assert!(found, "Pivot not found in data");
}
}
#[test]
fn k_meanspp_selecting_pivots_test() {
let dim = 2;
let num_points = 10;
let num_centers = 3;
// Generate some random data points
let mut rng = rand::thread_rng();
let data: Vec<f32> = (0..num_points * dim).map(|_| rng.gen()).collect();
let mut pivot_data = vec![0.0; num_centers * dim];
k_meanspp_selecting_pivots(&data, num_points, dim, &mut pivot_data, num_centers);
// Verify that each pivot point corresponds to a point in the data
for i in 0..num_centers {
let pivot_offset = i * dim;
let pivot = &pivot_data[pivot_offset..pivot_offset + dim];
// Make sure the pivot is found in the data
let mut found = false;
for j in 0..num_points {
let data_offset = j * dim;
let point = &data[data_offset..data_offset + dim];
if pivot == point {
found = true;
break;
}
}
assert!(found, "Pivot not found in data");
}
}
}
| true
|
f531b089a40d8b4841d4625cbb11ed732f91e585
|
Rust
|
kometen/actix-web-hello-world
|
/src/test.rs
|
UTF-8
| 1,229
| 2.609375
| 3
|
[] |
no_license
|
use super::*;
use actix_web::{http, test, web};
use web::Bytes;
#[actix_rt::test]
async fn test_index_ok() {
let req = test::TestRequest::with_header("content-type", "text/plain")
.to_http_request();
let resp = _index(req).await;
assert_eq!(resp.status(), http::StatusCode::OK);
}
#[actix_rt::test]
async fn test_hello_ok() {
let mut app = test::init_service(
App::new().service(hello)
).await;
let req = test::TestRequest::get().uri("/").to_request();
let resp = test::call_service(&mut app, req).await;
let result = test::read_body(resp).await;
assert_eq!(result, Bytes::from_static(b"Hej med dig!"));
}
#[actix_rt::test]
async fn test_user_ok() {
let mut app = test::init_service(
App::new().service(identify)
).await;
let payload = r#"{"username":"Claus","user_id":1967}"#.as_bytes();
let resp = test::TestRequest::post()
.uri("/identify")
.header(header::CONTENT_TYPE, "application/json")
.set_payload(payload)
.send_request(&mut app)
.await;
let result = test::read_body(resp).await;
assert_eq!(result, "Welcome Claus, your id is 1967");
}
#[test]
fn my_test() {
assert_eq!(2, 1 + 1);
}
| true
|
9bc11d30b5d9b4590fe583bf1c1bfdb83484ff07
|
Rust
|
stbuehler/rust-bitstring
|
/src/utils/bigendian.rs
|
UTF-8
| 10,982
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
use std::{
cmp::min,
mem::size_of,
};
/// Generic helper methods to treat [u*]-slices as big endian bit
/// strings.
pub trait BigEndianBitString: Sized {
/// bits in a single element
fn elembits() -> usize {
8 * size_of::<Self>()
}
/// integer with single bit set. bit 0 is the highest bit (big
/// endian). Wraps at `Self::elembits()`.
fn mask(ndx: usize) -> Self;
/// increment from right; don't touch first `prefix` bits; returns
/// true on overflow
///
/// # Panics
///
/// Panics if `prefix > Self::elembits() * slice.len()`.
fn inc(slice: &mut [Self], prefix: usize) -> bool;
/// Get the `ndx`th bit.
///
/// # Panics
///
/// Panics if `ndx >= Self::elembits() * slice.len()`.
fn get(slice: &[Self], ndx: usize) -> bool;
/// Set the `ndx`th bit to `bit`.
///
/// # Panics
///
/// Panics if `ndx >= Self::elembits() * slice.len()`.
fn set(slice: &mut [Self], ndx: usize, bit: bool) {
if bit {
Self::on(slice, ndx)
} else {
Self::off(slice, ndx)
}
}
/// Set the `ndx`th bit to `true`.
///
/// # Panics
///
/// Panics if `ndx >= Self::elembits() * slice.len()`.
fn on(slice: &mut [Self], ndx: usize);
/// Set the `ndx`th bit to `false`.
///
/// # Panics
///
/// Panics if `ndx >= Self::elembits() * slice.len()`.
fn off(slice: &mut [Self], ndx: usize);
/// Flips the `ndx`th bit.
///
/// # Panics
///
/// Panics if `ndx >= Self::elembits() * slice.len()`.
fn flip(slice: &mut [Self], ndx: usize);
/// Length of the longest shared prefix of two bit strings.
fn shared_prefix_len(slice: &[Self], other: &[Self], max_len: usize) -> usize;
/// Set all bits from [ndx..] to `false`.
///
/// Doesn't do anything if `ndx >= Self::elembits() * slice.len()`.
fn set_false_from(slice: &mut [Self], ndx: usize);
/// Whether all bits from [ndx..] are `false`.
///
/// Returns `true` if `ndx >= Self::elembits() * slice.len()`.
fn is_false_from(slice: &[Self], ndx: usize) -> bool;
/// Set all bits from [ndx..] to `true`.
///
/// Doesn't do anything if `ndx >= Self::elembits() * slice.len()`.
fn set_true_from(slice: &mut [Self], ndx: usize);
/// Whether all bits from [ndx..] are `true`.
///
/// Returns `true` if `ndx >= Self::elembits() * slice.len()`.
fn is_true_from(slice: &[Self], ndx: usize) -> bool;
/// check whether another bit string `other` shares the first
/// `prefix` bits with `self`
fn contains(slice: &[Self], prefix: usize, other: &[Self]) -> bool;
}
macro_rules! impl_big_endian_for {
($t:ty) => {
impl BigEndianBitString for $t {
fn mask(ndx: usize) -> Self {
let bits = Self::elembits();
let bit_ndx = bits - 1 - (ndx % bits);
1 << bit_ndx
}
fn inc(slice: &mut [Self], prefix: usize) -> bool {
assert!(prefix <= Self::elembits() * slice.len());
// first element that might be touched by overflow
let slice_ndx = prefix / Self::elembits();
if 0 == prefix % Self::elembits() {
// just don't overflow into elems before slice_ndx
let mut overflow = true;
for i in (slice_ndx..slice.len()).rev() {
let (r, o) = slice[i].overflowing_add(1);
slice[i] = r;
overflow = o;
if !overflow {
break;
}
}
overflow
} else {
// on overflow set all bits to false from "prefix"th bit
let last_prefix_bit_mask = Self::mask(prefix - 1);
let overflow_mask = last_prefix_bit_mask - 1;
let overflow_elem = slice[slice_ndx] & !overflow_mask;
let mut overflow = true;
for i in (slice_ndx..slice.len()).rev() {
let (r, o) = slice[i].overflowing_add(1);
slice[i] = r;
overflow = o;
if !overflow {
break;
}
}
// touched last bit in prefix? -> overflow
if overflow_elem & last_prefix_bit_mask
!= slice[slice_ndx] & last_prefix_bit_mask
{
// restore bits at slice_ndx which belong to
// prefix. as an overflow just happened, the
// remaining bits must be 0.
slice[slice_ndx] = overflow_elem;
true
} else {
assert!(!overflow, "can't overflow without touching prefix");
false
}
}
}
fn get(slice: &[Self], ndx: usize) -> bool {
let mask = Self::mask(ndx);
let slice_ndx = ndx / Self::elembits();
0 != (slice[slice_ndx] & mask)
}
fn on(slice: &mut [Self], ndx: usize) {
let mask = Self::mask(ndx);
let slice_ndx = ndx / Self::elembits();
slice[slice_ndx] |= mask;
}
fn off(slice: &mut [Self], ndx: usize) {
let mask = Self::mask(ndx);
let slice_ndx = ndx / Self::elembits();
slice[slice_ndx] &= !mask;
}
fn flip(slice: &mut [Self], ndx: usize) {
let mask = Self::mask(ndx);
let slice_ndx = ndx / Self::elembits();
slice[slice_ndx] ^= mask;
}
fn shared_prefix_len(slice: &[Self], other: &[Self], max_len: usize) -> usize {
if 0 == max_len {
return 0;
}
// slice index of last bit to compare
let slice_ndx = (max_len - 1) / Self::elembits();
for i in 0..slice_ndx {
let diff = slice[i] ^ other[i];
if 0 != diff {
return i * Self::elembits() + diff.leading_zeros() as usize;
}
}
let diff = slice[slice_ndx] ^ other[slice_ndx];
if 0 != diff {
min(
max_len,
slice_ndx * Self::elembits() + diff.leading_zeros() as usize,
)
} else {
max_len
}
}
fn set_false_from(slice: &mut [Self], ndx: usize) {
let slice_ndx = ndx / Self::elembits();
if 0 == ndx % Self::elembits() {
for i in slice_ndx..slice.len() {
slice[i] = 0;
}
} else if slice_ndx < slice.len() {
let mask = Self::mask(ndx - 1) - 1;
slice[slice_ndx] &= !mask;
for i in slice_ndx + 1..slice.len() {
slice[i] = 0;
}
}
}
fn is_false_from(slice: &[Self], ndx: usize) -> bool {
let slice_ndx = ndx / Self::elembits();
if 0 == ndx % Self::elembits() {
for i in slice_ndx..slice.len() {
if 0 != slice[i] {
return false;
}
}
} else if slice_ndx < slice.len() {
let mask = Self::mask(ndx - 1) - 1;
if 0 != slice[slice_ndx] & mask {
return false;
}
for i in slice_ndx + 1..slice.len() {
if 0 != slice[i] {
return false;
}
}
}
true
}
fn set_true_from(slice: &mut [Self], ndx: usize) {
let slice_ndx = ndx / Self::elembits();
if 0 == ndx % Self::elembits() {
for i in slice_ndx..slice.len() {
slice[i] = !0;
}
} else if slice_ndx < slice.len() {
let mask = Self::mask(ndx - 1) - 1;
slice[slice_ndx] |= mask;
for i in slice_ndx + 1..slice.len() {
slice[i] = !0;
}
}
}
fn is_true_from(slice: &[Self], ndx: usize) -> bool {
let slice_ndx = ndx / Self::elembits();
if 0 == ndx % Self::elembits() {
for i in slice_ndx..slice.len() {
if slice[i] != !0 {
return false;
}
}
} else if slice_ndx < slice.len() {
let mask = Self::mask(ndx - 1) - 1;
if slice[slice_ndx] | !mask != !0 {
return false;
}
for i in slice_ndx + 1..slice.len() {
if slice[i] != !0 {
return false;
}
}
}
true
}
fn contains(slice: &[Self], prefix: usize, other: &[Self]) -> bool {
let slice_ndx = prefix / Self::elembits();
for i in 0..slice_ndx {
if slice[i] != other[i] {
return false;
}
}
if 0 == prefix % Self::elembits() {
return true;
}
let mask = !(Self::mask(prefix - 1) - 1);
0 == mask & (slice[slice_ndx] ^ other[slice_ndx])
}
}
};
}
impl_big_endian_for! {u8}
impl_big_endian_for! {u16}
impl_big_endian_for! {u32}
impl_big_endian_for! {u64}
#[cfg(test)]
mod tests {
use super::BigEndianBitString;
#[test]
fn shared_prefix() {
assert_eq!(0, u8::shared_prefix_len(&[0b0000_0000], &[0b0000_0000], 0));
assert_eq!(0, u8::shared_prefix_len(&[0b0000_0000], &[0b1000_0000], 8));
assert_eq!(1, u8::shared_prefix_len(&[0b0000_0000], &[0b0000_0000], 1));
assert_eq!(1, u8::shared_prefix_len(&[0b0000_0000], &[0b0100_0000], 8));
assert_eq!(7, u8::shared_prefix_len(&[0b1100_0000], &[0b1100_0001], 7));
assert_eq!(7, u8::shared_prefix_len(&[0b1100_0000], &[0b1100_0001], 8));
assert_eq!(
0,
u8::shared_prefix_len(&[0b0000_0000, 0b0000_0000], &[0b0000_0000, 0b0000_0000], 0)
);
assert_eq!(
0,
u8::shared_prefix_len(&[0b0000_0000, 0b0000_0000], &[0b1000_0000, 0b0000_0000], 8)
);
assert_eq!(
1,
u8::shared_prefix_len(&[0b0000_0000, 0b0000_0000], &[0b0000_0000, 0b0000_0000], 1)
);
assert_eq!(
1,
u8::shared_prefix_len(&[0b0000_0000, 0b0000_0000], &[0b0100_0000, 0b0000_0000], 8)
);
assert_eq!(
7,
u8::shared_prefix_len(&[0b1100_0000, 0b0000_0000], &[0b1100_0001, 0b0000_0000], 7)
);
assert_eq!(
7,
u8::shared_prefix_len(&[0b1100_0000, 0b0000_0000], &[0b1100_0001, 0b0000_0000], 8)
);
assert_eq!(
8,
u8::shared_prefix_len(&[0b0010_1000, 0b0000_0000], &[0b0010_1000, 0b0000_0000], 8)
);
assert_eq!(
8,
u8::shared_prefix_len(&[0b0010_1000, 0b0000_0000], &[0b0010_1000, 0b1000_0000], 16)
);
assert_eq!(
9,
u8::shared_prefix_len(&[0b0010_1000, 0b0000_0000], &[0b0010_1000, 0b0000_0000], 9)
);
assert_eq!(
9,
u8::shared_prefix_len(&[0b0010_1000, 0b0000_0000], &[0b0010_1000, 0b0100_0000], 16)
);
assert_eq!(
15,
u8::shared_prefix_len(&[0b0010_1000, 0b1100_0000], &[0b0010_1000, 0b1100_0001], 15)
);
assert_eq!(
15,
u8::shared_prefix_len(&[0b0010_1000, 0b1100_0000], &[0b0010_1000, 0b1100_0001], 16)
);
}
fn u8_inc<S: AsMut<[u8]>>(mut slice: S, prefix: usize) -> (bool, S) {
let overflow = u8::inc(slice.as_mut(), prefix);
(overflow, slice)
}
#[test]
fn inc() {
// make sure overflow doesn't change the fixed prefix
assert_eq!(
(true, [0b0000_0000, 0b0000_0000]),
u8_inc([0b0000_0000, 0b0000_0000], 16)
);
assert_eq!(
(false, [0b0000_0000, 0b0000_0001]),
u8_inc([0b0000_0000, 0b0000_0000], 15)
);
assert_eq!(
(true, [0b0000_0000, 0b0000_0000]),
u8_inc([0b0000_0000, 0b0000_0001], 15)
);
assert_eq!(
(true, [0b0000_0000, 0b0000_1010]),
u8_inc([0b0000_0000, 0b0000_1011], 15)
);
assert_eq!(
(true, [0b0000_0000, 0b0000_1110]),
u8_inc([0b0000_0000, 0b0000_1111], 15)
);
assert_eq!(
(true, [0b0000_0000, 0b1111_1110]),
u8_inc([0b0000_0000, 0b1111_1111], 15)
);
assert_eq!(
(true, [0b0000_0001, 0b1111_1110]),
u8_inc([0b0000_0001, 0b1111_1111], 15)
);
assert_eq!(
(false, [0b0000_0000, 0b0000_0001]),
u8_inc([0b0000_0000, 0b0000_0000], 8)
);
assert_eq!(
(true, [0b0000_0000, 0b0000_0000]),
u8_inc([0b0000_0000, 0b1111_1111], 8)
);
assert_eq!(
(true, [0b0000_0001, 0b0000_0000]),
u8_inc([0b0000_0001, 0b1111_1111], 8)
);
assert_eq!(
(false, [0b0000_0000, 0b0000_0001]),
u8_inc([0b0000_0000, 0b0000_0000], 0)
);
assert_eq!(
(true, [0b0000_0000, 0b0000_0000]),
u8_inc([0b1111_1111, 0b1111_1111], 0)
);
}
}
| true
|
ab29e2e31d05d9ed817a27a0c70648c7f8693010
|
Rust
|
SINHASantos/tarpc
|
/tarpc/src/server/tokio.rs
|
UTF-8
| 3,304
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
use super::{Channel, Requests, Serve};
use futures::{prelude::*, ready, task::*};
use pin_project::pin_project;
use std::pin::Pin;
/// A future that drives the server by [spawning](tokio::spawn) a [`TokioChannelExecutor`](TokioChannelExecutor)
/// for each new channel. Returned by
/// [`Incoming::execute`](crate::server::incoming::Incoming::execute).
#[must_use]
#[pin_project]
#[derive(Debug)]
pub struct TokioServerExecutor<T, S> {
#[pin]
inner: T,
serve: S,
}
impl<T, S> TokioServerExecutor<T, S> {
pub(crate) fn new(inner: T, serve: S) -> Self {
Self { inner, serve }
}
}
/// A future that drives the server by [spawning](tokio::spawn) each [response
/// handler](super::InFlightRequest::execute) on tokio's default executor. Returned by
/// [`Channel::execute`](crate::server::Channel::execute).
#[must_use]
#[pin_project]
#[derive(Debug)]
pub struct TokioChannelExecutor<T, S> {
#[pin]
inner: T,
serve: S,
}
impl<T, S> TokioServerExecutor<T, S> {
fn inner_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut T> {
self.as_mut().project().inner
}
}
impl<T, S> TokioChannelExecutor<T, S> {
fn inner_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut T> {
self.as_mut().project().inner
}
}
// Send + 'static execution helper methods.
impl<C> Requests<C>
where
C: Channel,
C::Req: Send + 'static,
C::Resp: Send + 'static,
{
/// Executes all requests using the given service function. Requests are handled concurrently
/// by [spawning](::tokio::spawn) each handler on tokio's default executor.
pub fn execute<S>(self, serve: S) -> TokioChannelExecutor<Self, S>
where
S: Serve<C::Req, Resp = C::Resp> + Send + 'static,
{
TokioChannelExecutor { inner: self, serve }
}
}
impl<St, C, Se> Future for TokioServerExecutor<St, Se>
where
St: Sized + Stream<Item = C>,
C: Channel + Send + 'static,
C::Req: Send + 'static,
C::Resp: Send + 'static,
Se: Serve<C::Req, Resp = C::Resp> + Send + 'static + Clone,
Se::Fut: Send,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
while let Some(channel) = ready!(self.inner_pin_mut().poll_next(cx)) {
tokio::spawn(channel.execute(self.serve.clone()));
}
tracing::info!("Server shutting down.");
Poll::Ready(())
}
}
impl<C, S> Future for TokioChannelExecutor<Requests<C>, S>
where
C: Channel + 'static,
C::Req: Send + 'static,
C::Resp: Send + 'static,
S: Serve<C::Req, Resp = C::Resp> + Send + 'static + Clone,
S::Fut: Send,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
while let Some(response_handler) = ready!(self.inner_pin_mut().poll_next(cx)) {
match response_handler {
Ok(resp) => {
let server = self.serve.clone();
tokio::spawn(async move {
resp.execute(server).await;
});
}
Err(e) => {
tracing::warn!("Requests stream errored out: {}", e);
break;
}
}
}
Poll::Ready(())
}
}
| true
|
25e4425617f5393dd7c417a1e2af60998ddb820f
|
Rust
|
Luminiscental/aoc2020
|
/src/day22.rs
|
UTF-8
| 2,695
| 3.25
| 3
|
[] |
no_license
|
use crate::day::Day;
use std::{
cmp::Ordering,
collections::{HashSet, VecDeque},
};
enum Player {
One,
Two,
}
fn score(deck: VecDeque<usize>) -> usize {
deck.iter().rev().zip(1..).map(|pair| pair.0 * pair.1).sum()
}
fn play_recursive(
mut player1: VecDeque<usize>,
mut player2: VecDeque<usize>,
allow_recursion: bool,
) -> (Player, usize) {
let mut states = HashSet::new();
while !player1.is_empty() && !player2.is_empty() {
if allow_recursion && !states.insert((player1.clone(), player2.clone())) {
player2.clear();
break;
}
let (play1, play2) = (player1.pop_front().unwrap(), player2.pop_front().unwrap());
let winner = if !allow_recursion || play1 > player1.len() || play2 > player2.len() {
match play1.cmp(&play2) {
Ordering::Greater => Player::One,
Ordering::Less => Player::Two,
Ordering::Equal => panic!("tie"),
}
} else {
play_recursive(
player1.iter().take(play1).copied().collect(),
player2.iter().take(play2).copied().collect(),
allow_recursion,
)
.0
};
match winner {
Player::One => {
player1.push_back(play1);
player1.push_back(play2);
}
Player::Two => {
player2.push_back(play2);
player2.push_back(play1);
}
}
}
if player2.is_empty() {
(Player::One, score(player1))
} else {
(Player::Two, score(player2))
}
}
pub struct Day22 {}
impl<'a> Day<'a> for Day22 {
type Input1 = (VecDeque<usize>, VecDeque<usize>);
type Input2 = (VecDeque<usize>, VecDeque<usize>);
type Output1 = usize;
type Output2 = usize;
const INDEX: usize = 22;
fn parse(raw_input: &'a str) -> Self::Input1 {
let lines: Vec<_> = raw_input.lines().collect();
let mut chunks = lines.split(|line| line.is_empty());
(
chunks.next().unwrap()[1..]
.iter()
.map(|line| line.parse().unwrap())
.collect(),
chunks.next().unwrap()[1..]
.iter()
.map(|line| line.parse().unwrap())
.collect(),
)
}
fn solve_part1(input: Self::Input1) -> (Self::Input2, Self::Output1) {
let winning_score = play_recursive(input.0.clone(), input.1.clone(), false).1;
(input, winning_score)
}
fn solve_part2(input: Self::Input2) -> Self::Output2 {
play_recursive(input.0, input.1, true).1
}
}
| true
|
66b0da69456edbfb1266fc5d57f5ccdb50169af9
|
Rust
|
opcow/fe_o2
|
/src/cpu65.rs
|
UTF-8
| 48,287
| 3.296875
| 3
|
[] |
no_license
|
use crate::cpu65::Mode::{Abs, Abx, Aby, Acc, Imm, Imp, Ind, Inx, Iny, Rel, Unk, Zpg, Zpx, Zpy};
use std::fmt;
use std::io::{Error, ErrorKind};
const MAX_ADD: usize = 0xffff;
const MEM_SIZE: usize = MAX_ADD + 6;
// registers stored in array
const A_REG: usize = 0x10000;
const X_REG: usize = 0x10001;
const Y_REG: usize = 0x10002;
// so that CPU::load() can return a Result
#[derive(Debug, PartialEq)]
pub enum LoadError {
SegmentAddress,
}
// required for custom error type
impl From<LoadError> for std::io::Error {
fn from(e: LoadError) -> std::io::Error {
match e {
LoadError::SegmentAddress => Error::new(ErrorKind::Other, "Segment address error!"),
}
}
}
// for saving addresses where memory was loaded
pub struct Segment {
pub start: usize,
pub end: usize,
}
// 6502 instruction modes
#[derive(Debug, Copy, Clone)]
pub enum Mode {
Imm, // immediate mode
Abs, // absolute
Abx, // absolute x
Aby, // absolute y
Inx, // indirect x
Iny, // indirect y
Zpg, // zero page
Zpx, // zero page x
Zpy, // zero page y
Acc, // accumulator
Ind, // indirect
Rel, // relative
Imp, // implied
Unk,
}
// the 6502 status register
enum Flag {
C = 1 << 0, // carry
Z = 1 << 1, // zero
I = 1 << 2, // interrupt
D = 1 << 3, // decimal
B = 1 << 4, // break
// U = 1 << 5, // unused
V = 1 << 6, // overflow
N = 1 << 7, // negative
}
/// `CPU` virtual 6502 processor + memory
pub struct CPU {
/// 6502 status register
status: u8,
/// 6502 stack pointer
sp: u8,
/// 6502 program counter
pc: u16,
/// the 64k memory space
mem: [u8; MEM_SIZE],
// 6502 instructions
// ins: [Instruction; 256],
}
/// Constructs a new `CPU`.
///
/// # Examples
///
/// ```
/// use cpu65::CPU;
///
/// let mut cpu = cpu65::CPU::new();
/// ```
impl fmt::Display for CPU {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let ins = INSTRUCTIONS[self.mem[self.pc as usize] as usize];
let opstr = CPU::get_format(
ins.mode,
self.pc as usize,
&self.mem[(self.pc as usize + 1)..=(self.pc as usize + 2)],
);
let operands = match ins.ops {
2 => format!(
"{:>02X} {:>02X}",
self.mem[self.pc as usize + 1],
self.mem[self.pc as usize + 2],
),
1 => format!("{:>02X}", self.mem[self.pc as usize + 1],),
_ => "".to_string(),
};
write!(
f,
"{:>04X} {} {:<10}{:>02X} {:7}|{}| A={:>02X} X={:>02X} Y={:>02X} SP={:>02X}",
self.pc,
ins.mnemonic,
opstr,
ins.opcode,
operands,
status_as_string(self.status),
self.mem[A_REG],
self.mem[X_REG],
self.mem[Y_REG],
self.sp
)
}
}
fn status_as_string(status: u8) -> String {
let mut s = ['N', 'V', 'U', 'B', 'D', 'I', 'Z', 'C'];
let mut bit = 1 << 7;
for i in s.iter_mut() {
if status & bit == 0 {
*i = '_';
}
bit >>= 1;
}
s.iter().collect()
}
impl CPU {
pub fn new() -> CPU {
CPU {
// registers a, x, y are stored in extra bytes of memory
// this makes Addessing modes easier
// (or could be a huge mistake)
status: 0,
sp: 0xff,
pc: 0,
mem: [0; MEM_SIZE],
// ins: INSTRUCTIONS,
}
}
///////////////////////////////////////////////////
/// pub methods for accessing the machine state ///
///////////////////////////////////////////////////
pub fn set_pc(&mut self, pc: u16) {
self.pc = pc;
}
pub fn get_pc(&self) -> usize {
self.pc as usize
}
// gets the absolute address of a relative branch
#[inline(always)]
pub fn branch_addr(&self, pc: isize) -> usize {
(self.mem[pc as usize + 1] as i8 as isize + pc as isize + 2) as usize
}
// get the indirect 16-bit address as usize
#[inline(always)]
pub fn mem_ptr(&self, a: usize) -> usize {
self.mem[a] as usize | (self.mem[a + 1] as usize) << 8
}
// pub fn mem_mut(&mut self) -> &mut [u8] {
// &mut self.mem
// }
pub fn mem(&self) -> &[u8] {
&self.mem
}
pub fn step(&mut self) {
let ins = &INSTRUCTIONS[self.mem[self.pc as usize] as usize];
(ins.ef)(self); // call emu fn
self.pc += ins.step; // update program counter
}
pub fn load(&mut self, buf: &[u8], load_add: Option<usize>) -> Result<Vec<Segment>, LoadError> {
let mut start_add: usize;
let mut end_add: usize;
let mut offset: usize;
let mut segs: Vec<Segment> = Vec::new();
match load_add {
Some(add) => {
let seg_end = add + buf.len() - 1;
if add > 0xfffe || seg_end <= add || seg_end > 0xffff {
return Err(LoadError::SegmentAddress);
}
segs.push(Segment {
start: add,
end: seg_end,
});
self.mem[add..=seg_end].clone_from_slice(&buf[0..]);
return Ok(segs);
}
None => {
offset = 2; // skip the header
}
};
while offset < buf.len() {
start_add = (buf[offset] as usize) | (buf[offset + 1] as usize) << 8;
end_add = (buf[offset + 2] as usize) | (buf[offset + 3] as usize) << 8;
if start_add > 0xfffc || end_add > 0xffff || end_add < start_add + 2 {
return Err(LoadError::SegmentAddress);
}
let seg_beg = offset + 4;
let seg_end = seg_beg + (end_add - start_add) + 1;
segs.push(Segment {
start: start_add,
end: end_add,
});
self.mem[start_add..=end_add].clone_from_slice(&buf[seg_beg..seg_end]);
offset = seg_end;
}
Ok(segs)
}
///////////////////////////
/// convenience methods ///
///////////////////////////
#[inline(always)]
fn push_16(&mut self, n: u16) {
self.mem[0x100 + self.sp as usize - 1] = n as u8;
self.mem[0x100 + self.sp as usize] = (n >> 8) as u8;
self.sp = self.sp.wrapping_sub(2);
}
#[inline(always)]
fn pop_16(&mut self) -> u16 {
self.sp = self.sp.wrapping_add(2);
u16::from(self.mem[0x100 + self.sp as usize - 1])
& u16::from(self.mem[0x100 + self.sp as usize]) << 8
}
#[inline(always)]
fn push_8(&mut self, n: u8) {
self.mem[0x100 + self.sp as usize] = n;
self.sp = self.sp.wrapping_sub(1);
}
#[inline(always)]
fn pop_8(&mut self) -> u8 {
self.sp = self.sp.wrapping_add(1);
self.mem[0x100 + self.sp as usize]
}
#[inline(always)]
fn get_mem_16(&self, a: usize) -> u16 {
u16::from(self.mem[a]) & u16::from(self.mem[a + 1]) << 8
}
#[inline(always)]
fn check_status(&self, r: Flag) -> bool {
self.status & r as u8 != 0
}
#[inline(always)]
fn get_op_add(&self) -> usize {
(self.mem[self.pc as usize + 1] as usize) | (self.mem[self.pc as usize + 2] as usize) << 8
}
fn get_eff_add(&self) -> usize {
match INSTRUCTIONS[self.mem[self.pc as usize] as usize].mode {
Imm => self.pc as usize + 1,
Zpg => self.mem[self.pc as usize + 1] as usize,
Abs => {
(self.mem[self.pc as usize + 1] as usize)
| (self.mem[self.pc as usize + 2] as usize) << 8
}
Inx => {
(self.mem[(self.mem[self.pc as usize + 1] + self.mem[X_REG]) as usize] as usize)
& (self.mem[(self.mem[self.pc as usize + 1] + self.mem[X_REG]) as usize + 1]
as usize)
<< 8
}
Iny => self.mem_ptr(self.mem[self.pc as usize + 1] as usize) + self.mem[Y_REG] as usize,
Zpx => (self.mem[self.pc as usize + 1]).wrapping_add(self.mem[X_REG]) as usize,
Zpy => (self.mem[self.pc as usize + 1]).wrapping_add(self.mem[Y_REG]) as usize,
// page wrapping for abx/y?
Abx => {
// let addr = self.get_op_add();
// addr & 0xff00 | (addr as u8).wrapping_add(self.mem[X_REG]) as usize
self.get_op_add() + self.mem[X_REG] as usize
}
Aby => {
// let addr = self.get_op_add();
// addr & 0xff00 | (addr as u8).wrapping_add(self.mem[Y_REG]) as usize
self.get_op_add() + self.mem[Y_REG] as usize
}
Acc => A_REG,
_ => panic!("Instruction mode doesn't target an Addess!"),
}
}
///////////////////////////////////////////////////////////////
/// instruction emulation functions ///
///////////////////////////////////////////////////////////////
// unhandled instruction
pub fn emu_err(&mut self) {
panic!(format!(
"Emulation for instruction {} not implemented!",
INSTRUCTIONS[self.mem[self.pc as usize] as usize].mnemonic
));
}
// bitwise logic
fn emu_and(&mut self) {
self.mem[A_REG] &= self.mem[self.get_eff_add()];
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
fn emu_bit(&mut self) {
let target = self.get_eff_add();
if self.mem[A_REG] & self.mem[target] == 0 {
self.status |= Flag::Z as u8;
} else {
self.status &= !(Flag::Z as u8);
}
self.status |= self.mem[target] | 0xC0;
}
fn emu_eor(&mut self) {
self.mem[A_REG] ^= self.mem[self.get_eff_add()];
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
fn emu_ora(&mut self) {
self.mem[A_REG] |= self.mem[self.get_eff_add()];
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
// register-to-register transfer operations
fn emu_tax(&mut self) {
self.mem[X_REG] = self.mem[A_REG];
CPU::set_nz_reg(&mut self.status, self.mem[X_REG]);
}
fn emu_tay(&mut self) {
self.mem[Y_REG] = self.mem[A_REG];
CPU::set_nz_reg(&mut self.status, self.mem[Y_REG]);
}
fn emu_txa(&mut self) {
self.mem[A_REG] = self.mem[X_REG];
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
fn emu_tya(&mut self) {
self.mem[A_REG] = self.mem[Y_REG];
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
// load and store operations
fn emu_lda(&mut self) {
self.mem[A_REG] = self.mem[self.get_eff_add()];
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
fn emu_ldx(&mut self) {
self.mem[X_REG] = self.mem[self.get_eff_add()];
CPU::set_nz_reg(&mut self.status, self.mem[X_REG]);
}
fn emu_ldy(&mut self) {
self.mem[Y_REG] = self.mem[self.get_eff_add()];
CPU::set_nz_reg(&mut self.status, self.mem[Y_REG]);
}
fn emu_sta(&mut self) {
self.mem[self.get_eff_add()] = self.mem[A_REG];
}
fn emu_stx(&mut self) {
self.mem[self.get_eff_add()] = self.mem[X_REG];
}
fn emu_sty(&mut self) {
self.mem[self.get_eff_add()] = self.mem[Y_REG];
}
// shift operations
fn emu_asl(&mut self) {
let addr = &mut self.mem[self.get_eff_add()];
self.status |= *addr >> 7;
*addr <<= 1;
}
fn emu_lsr(&mut self) {
let addr = &mut self.mem[self.get_eff_add()];
self.status |= *addr & 1; // bit 0 goes into carry bit
*addr >>= 1;
}
fn emu_rol(&mut self) {
let addr = &mut self.mem[self.get_eff_add()];
self.status |= *addr >> 7; // bit 7 goes into carry bit
*addr <<= 1;
*addr |= self.status & (Flag::C as u8); // carry goes into bit 0
}
fn emu_ror(&mut self) {
let addr = &mut self.mem[self.get_eff_add()];
self.status |= *addr & 1; // bit 0 goes into carry bit
*addr >>= 1;
*addr |= self.status & ((Flag::C as u8) << 7); // carry goes into bit 7
}
// flow control operations
fn emu_bra(&mut self) {
let status = match self.mem[self.pc as usize] {
0x10 => self.status & Flag::N as u8 == 0,
0x30 => self.status & Flag::N as u8 != 0,
0x50 => self.status & Flag::V as u8 == 0,
0x70 => self.status & Flag::V as u8 != 0,
0x90 => self.status & Flag::C as u8 == 0,
0xb0 => self.status & Flag::C as u8 != 0,
0xd0 => self.status & Flag::Z as u8 == 0,
0xf0 => self.status & Flag::Z as u8 != 0,
// should never happen
_ => panic!("Encountered unknown branch opcode!"),
};
if status {
self.pc =
2 + ((self.pc as i16) + i16::from(self.mem[self.pc as usize + 1] as i8)) as u16;
} else {
self.pc += 2;
}
}
fn emu_jmp(&mut self) {
self.pc = u16::from(self.mem[self.pc as usize + 1])
| u16::from(self.mem[self.pc as usize + 2]) << 8
}
fn emu_jsr(&mut self) {
let t = self.pc + 2;
self.mem[0x100 + self.sp as usize - 1] = t as u8;
self.mem[0x100 + self.sp as usize] = (t >> 8) as u8;
self.sp -= 2;
self.pc = u16::from(self.mem[self.pc as usize + 1])
| u16::from(self.mem[self.pc as usize + 2]) << 8
}
fn emu_rts(&mut self) {
self.sp = self.sp.wrapping_add(2);
self.pc = (u16::from(self.mem[0x100 + self.sp as usize - 1])
| (u16::from(self.mem[0x100 + self.sp as usize]) << 8))
+ 1;
}
// increment and decrement operations
fn emu_dec(&mut self) {
let target = self.get_eff_add();
self.mem[target] = self.mem[target].wrapping_sub(1);
CPU::set_nz_reg(&mut self.status, self.mem[target]);
}
fn emu_dex(&mut self) {
self.mem[X_REG] = self.mem[X_REG].wrapping_sub(1);
CPU::set_nz_reg(&mut self.status, self.mem[X_REG]);
}
fn emu_dey(&mut self) {
self.mem[Y_REG] = self.mem[Y_REG].wrapping_sub(1);
CPU::set_nz_reg(&mut self.status, self.mem[Y_REG]);
}
fn emu_inc(&mut self) {
let target = self.get_eff_add();
self.mem[target] = self.mem[target].wrapping_sub(1);
CPU::set_nz_reg(&mut self.status, self.mem[target]);
}
fn emu_inx(&mut self) {
self.mem[X_REG] = self.mem[X_REG].wrapping_add(1);
CPU::set_nz_reg(&mut self.status, self.mem[X_REG]);
}
fn emu_iny(&mut self) {
self.mem[Y_REG] = self.mem[Y_REG].wrapping_add(1);
CPU::set_nz_reg(&mut self.status, self.mem[Y_REG]);
}
// addition and subtraction
fn emu_adc(&mut self) {
//fix me check adc and sbc for correctness
let target = self.get_eff_add();
let r = self.mem[A_REG]
.overflowing_add(self.mem[target] + if self.check_status(Flag::C) { 1 } else { 0 });
// set carry if wraps
if r.1 {
self.status |= Flag::C as u8
} else {
self.status &= !(Flag::C as u8)
}
// set overflow
if (self.mem[A_REG] ^ r.0) & (self.mem[target] ^ r.0) & 0x80 != 0 {
// if (r.0 ^ self.mem[A_REG]) & 0x80 != 0 {
self.status |= Flag::V as u8
} else {
self.status &= !(Flag::V as u8)
}
self.mem[A_REG] = r.0;
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
fn emu_sbc(&mut self) {
let target = self.get_eff_add();
let r = self.mem[A_REG]
.overflowing_sub(self.mem[target] + if self.check_status(Flag::C) { 0 } else { 1 });
// set carry if wraps
if r.1 {
self.status |= Flag::C as u8
} else {
self.status &= !(Flag::C as u8)
}
// set overflow
if (self.mem[A_REG] ^ r.0) & ((self.mem[target] ^ 0xff) ^ r.0) & 0x80 != 0 {
self.status |= Flag::V as u8
} else {
self.status &= !(Flag::V as u8)
}
self.mem[A_REG] = r.0;
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
// comparison operations
fn emu_cmp(&mut self) {
let r: i8 = self.mem[A_REG] as i8 - self.mem[self.get_eff_add()] as i8;
CPU::set_nz_reg(&mut self.status, r as u8);
if r >= 0 {
self.status |= Flag::C as u8
} else {
self.status &= !(Flag::C as u8)
}
}
fn emu_cpx(&mut self) {
let r: i8 = self.mem[X_REG] as i8 - self.mem[self.get_eff_add()] as i8;
CPU::set_nz_reg(&mut self.status, r as u8);
if r >= 0 {
self.status |= Flag::C as u8
} else {
self.status &= !(Flag::C as u8)
}
}
fn emu_cpy(&mut self) {
let r: i8 = self.mem[Y_REG] as i8 - self.mem[self.get_eff_add()] as i8;
CPU::set_nz_reg(&mut self.status, r as u8);
if r >= 0 {
self.status |= Flag::C as u8
} else {
self.status &= !(Flag::C as u8)
}
}
// stack operations
fn emu_pha(&mut self) {
self.mem[0x100 + self.sp as usize] = self.mem[A_REG];
self.sp -= 1;
}
fn emu_php(&mut self) {
self.mem[0x100 + self.sp as usize] = self.status;
self.sp -= 1;
}
fn emu_pla(&mut self) {
self.sp += 1;
self.mem[A_REG] = self.mem[0x100 + self.sp as usize];
// let r = self.mem[A_REG];
CPU::set_nz_reg(&mut self.status, self.mem[A_REG]);
}
fn emu_plp(&mut self) {
self.sp += 1;
self.status = self.mem[0x100 + self.sp as usize];
}
fn emu_tsx(&mut self) {
self.mem[X_REG] = self.sp;
CPU::set_nz_reg(&mut self.status, self.mem[X_REG]);
}
fn emu_txs(&mut self) {
self.mem[X_REG] = self.sp;
}
// status register operations
fn emu_clc(&mut self) {
self.status &= !(Flag::C as u8);
}
fn emu_cld(&mut self) {
self.status &= !(Flag::D as u8);
}
fn emu_cli(&mut self) {
self.status &= !(Flag::I as u8);
}
fn emu_clv(&mut self) {
self.status &= !(Flag::V as u8);
}
fn emu_sec(&mut self) {
self.status |= Flag::C as u8;
}
fn emu_sed(&mut self) {
self.status |= Flag::D as u8;
}
fn emu_sei(&mut self) {
self.status |= Flag::I as u8;
}
// interrupt related
fn emu_rti(&mut self) {
self.status = self.pop_8();
self.pc = self.pop_16();
}
fn emu_brk(&mut self) {
// let pc = self.pc as u16;
// let st = self.status;
self.push_16(self.pc);
self.push_8(self.status);
self.pc = self.get_mem_16(0xfffe);
self.status |= Flag::B as u8;
}
// nope
fn emu_nop(&mut self) {}
}
impl CPU {
// formatting for printing an instruction
pub fn get_format(m: Mode, addr: usize, ops: &[u8]) -> String {
match m {
Imm => format!("#${:>02X}", ops[0]),
Zpg => format!("${:>02X}", ops[0]),
Zpx => format!("${:>02X},X", ops[0]),
Zpy => format!("${:>02X},Y", ops[0]),
Abs => format!("${:>02X}{:>02X}", ops[1], ops[0]),
Abx => format!("${:>02X}{:>02X},X", ops[1], ops[0]),
Aby => format!("${:>02X}{:>02X},Y", ops[1], ops[0]),
Ind => format!("(${:>02X}{:>02X})", ops[1], ops[0]),
Inx => format!("(${:>02X},X)", ops[0]),
Iny => format!("(${:>02X}),Y", ops[0]),
Acc => "A".to_string(),
Rel => format!(
"${:>04X}",
(ops[0] as i8 as isize + addr as isize + 2) as usize
),
Imp => "".to_string(),
Unk => "---".to_string(),
}
}
#[inline(always)]
fn set_nz_reg(status: &mut u8, r: u8) {
if r == 0 {
*status |= Flag::Z as u8;
*status &= !(Flag::N as u8);
} else {
*status &= !(Flag::Z as u8);
*status = *status & 0x7f | (r & 0x80);
}
}
}
#[derive(Copy, Clone)]
pub struct Instruction {
pub opcode: i32, // the opcode
pub ef: fn(&mut CPU), // the thing that gets the result
pub step: u16, // how much to move the pc after this instruction
pub ops: i32,
pub mode: Mode,
pub isbr: bool,
pub mnemonic: &'static str,
}
// use cpu65::Modes::*;
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const INSTRUCTIONS: [Instruction; 256] = [
Instruction { opcode: 0x00, ef: CPU::emu_brk, step: 0, ops: 0, mode: Imp, isbr: false, mnemonic: "BRK", },
Instruction { opcode: 0x01, ef: CPU::emu_ora, step: 2, ops: 1, mode: Inx, isbr: false, mnemonic: "ORA", },
Instruction { opcode: 0x02, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x03, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x04, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x05, ef: CPU::emu_ora, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "ORA", },
Instruction { opcode: 0x06, ef: CPU::emu_asl, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "ASL", },
Instruction { opcode: 0x07, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x08, ef: CPU::emu_php, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "PHP", },
Instruction { opcode: 0x09, ef: CPU::emu_ora, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "ORA", },
Instruction { opcode: 0x0a, ef: CPU::emu_asl, step: 1, ops: 0, mode: Acc, isbr: false, mnemonic: "ASL", },
Instruction { opcode: 0x0b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x0c, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x0d, ef: CPU::emu_ora, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "ORA", },
Instruction { opcode: 0x0e, ef: CPU::emu_asl, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "ASL", },
Instruction { opcode: 0x0f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x10, ef: CPU::emu_bra, step: 0, ops: 1, mode: Rel, isbr: true, mnemonic: "BPL", },
Instruction { opcode: 0x11, ef: CPU::emu_ora, step: 2, ops: 1, mode: Iny, isbr: false, mnemonic: "ORA", },
Instruction { opcode: 0x12, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x13, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x14, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x15, ef: CPU::emu_ora, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "ORA", },
Instruction { opcode: 0x16, ef: CPU::emu_asl, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "ASL", },
Instruction { opcode: 0x17, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x18, ef: CPU::emu_clc, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "CLC", },
Instruction { opcode: 0x19, ef: CPU::emu_ora, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "ORA", },
Instruction { opcode: 0x1a, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x1b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x1c, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x1d, ef: CPU::emu_ora, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "ORA", },
Instruction { opcode: 0x1e, ef: CPU::emu_asl, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "ASL", },
Instruction { opcode: 0x1f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x20, ef: CPU::emu_jsr, step: 0, ops: 2, mode: Abs, isbr: true, mnemonic: "JSR", },
Instruction { opcode: 0x21, ef: CPU::emu_and, step: 2, ops: 1, mode: Inx, isbr: false, mnemonic: "AND", },
Instruction { opcode: 0x22, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x23, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x24, ef: CPU::emu_bit, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "BIT", },
Instruction { opcode: 0x25, ef: CPU::emu_and, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "AND", },
Instruction { opcode: 0x26, ef: CPU::emu_rol, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "ROL", },
Instruction { opcode: 0x27, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x28, ef: CPU::emu_plp, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "PLP", },
Instruction { opcode: 0x29, ef: CPU::emu_and, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "AND", },
Instruction { opcode: 0x2a, ef: CPU::emu_rol, step: 1, ops: 0, mode: Acc, isbr: false, mnemonic: "ROL", },
Instruction { opcode: 0x2b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x2c, ef: CPU::emu_bit, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "BIT", },
Instruction { opcode: 0x2d, ef: CPU::emu_and, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "AND", },
Instruction { opcode: 0x2e, ef: CPU::emu_rol, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "ROL", },
Instruction { opcode: 0x2f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x30, ef: CPU::emu_bra, step: 0, ops: 1, mode: Rel, isbr: true, mnemonic: "BMI", },
Instruction { opcode: 0x31, ef: CPU::emu_and, step: 2, ops: 1, mode: Iny, isbr: false, mnemonic: "AND", },
Instruction { opcode: 0x32, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x33, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x34, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x35, ef: CPU::emu_and, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "AND", },
Instruction { opcode: 0x36, ef: CPU::emu_rol, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "ROL", },
Instruction { opcode: 0x37, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x38, ef: CPU::emu_sec, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "SEC", },
Instruction { opcode: 0x39, ef: CPU::emu_and, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "AND", },
Instruction { opcode: 0x3a, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x3b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x3c, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x3d, ef: CPU::emu_and, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "AND", },
Instruction { opcode: 0x3e, ef: CPU::emu_rol, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "ROL", },
Instruction { opcode: 0x3f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x40, ef: CPU::emu_rti, step: 0, ops: 0, mode: Imp, isbr: false, mnemonic: "RTI", },
Instruction { opcode: 0x41, ef: CPU::emu_eor, step: 2, ops: 1, mode: Inx, isbr: false, mnemonic: "EOR", },
Instruction { opcode: 0x42, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x43, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x44, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x45, ef: CPU::emu_eor, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "EOR", },
Instruction { opcode: 0x46, ef: CPU::emu_lsr, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "LSR", },
Instruction { opcode: 0x47, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x48, ef: CPU::emu_pha, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "PHA", },
Instruction { opcode: 0x49, ef: CPU::emu_eor, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "EOR", },
Instruction { opcode: 0x4a, ef: CPU::emu_lsr, step: 1, ops: 0, mode: Acc, isbr: false, mnemonic: "LSR", },
Instruction { opcode: 0x4b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x4c, ef: CPU::emu_jmp, step: 0, ops: 2, mode: Abs, isbr: true, mnemonic: "JMP", },
Instruction { opcode: 0x4d, ef: CPU::emu_eor, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "EOR", },
Instruction { opcode: 0x4e, ef: CPU::emu_lsr, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "LSR", },
Instruction { opcode: 0x4f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x50, ef: CPU::emu_bra, step: 0, ops: 1, mode: Rel, isbr: true, mnemonic: "BVC", },
Instruction { opcode: 0x51, ef: CPU::emu_eor, step: 2, ops: 1, mode: Iny, isbr: false, mnemonic: "EOR", },
Instruction { opcode: 0x52, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x53, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x54, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x55, ef: CPU::emu_eor, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "EOR", },
Instruction { opcode: 0x56, ef: CPU::emu_lsr, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "LSR", },
Instruction { opcode: 0x57, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x58, ef: CPU::emu_cli, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "CLI", },
Instruction { opcode: 0x59, ef: CPU::emu_eor, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "EOR", },
Instruction { opcode: 0x5a, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x5b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x5c, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x5d, ef: CPU::emu_eor, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "EOR", },
Instruction { opcode: 0x5e, ef: CPU::emu_lsr, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "LSR", },
Instruction { opcode: 0x5f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x60, ef: CPU::emu_rts, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "RTS", },
Instruction { opcode: 0x61, ef: CPU::emu_adc, step: 2, ops: 1, mode: Inx, isbr: false, mnemonic: "ADC", },
Instruction { opcode: 0x62, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x63, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x64, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x65, ef: CPU::emu_adc, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "ADC", },
Instruction { opcode: 0x66, ef: CPU::emu_ror, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "ROR", },
Instruction { opcode: 0x67, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x68, ef: CPU::emu_pla, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "PLA", },
Instruction { opcode: 0x69, ef: CPU::emu_adc, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "ADC", },
Instruction { opcode: 0x6a, ef: CPU::emu_ror, step: 1, ops: 0, mode: Acc, isbr: false, mnemonic: "ROR", },
Instruction { opcode: 0x6b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x6c, ef: CPU::emu_err, step: 0, ops: 2, mode: Ind, isbr: true, mnemonic: "JMP", },
Instruction { opcode: 0x6d, ef: CPU::emu_adc, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "ADC", },
Instruction { opcode: 0x6e, ef: CPU::emu_ror, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "ROR", },
Instruction { opcode: 0x6f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x70, ef: CPU::emu_bra, step: 0, ops: 1, mode: Rel, isbr: true, mnemonic: "BVS", },
Instruction { opcode: 0x71, ef: CPU::emu_adc, step: 2, ops: 1, mode: Iny, isbr: false, mnemonic: "ADC", },
Instruction { opcode: 0x72, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x73, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x74, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x75, ef: CPU::emu_adc, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "ADC", },
Instruction { opcode: 0x76, ef: CPU::emu_ror, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "ROR", },
Instruction { opcode: 0x77, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x78, ef: CPU::emu_sei, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "SEI", },
Instruction { opcode: 0x79, ef: CPU::emu_adc, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "ADC", },
Instruction { opcode: 0x7a, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x7b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x7c, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x7d, ef: CPU::emu_adc, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "ADC", },
Instruction { opcode: 0x7e, ef: CPU::emu_ror, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "ROR", },
Instruction { opcode: 0x7f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x80, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x81, ef: CPU::emu_sta, step: 2, ops: 1, mode: Inx, isbr: false, mnemonic: "STA", },
Instruction { opcode: 0x82, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x83, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x84, ef: CPU::emu_sty, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "STY", },
Instruction { opcode: 0x85, ef: CPU::emu_sta, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "STA", },
Instruction { opcode: 0x86, ef: CPU::emu_stx, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "STX", },
Instruction { opcode: 0x87, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x88, ef: CPU::emu_dey, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "DEY", },
Instruction { opcode: 0x89, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x8a, ef: CPU::emu_txa, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "TXA", },
Instruction { opcode: 0x8b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x8c, ef: CPU::emu_sty, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "STY", },
Instruction { opcode: 0x8d, ef: CPU::emu_sta, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "STA", },
Instruction { opcode: 0x8e, ef: CPU::emu_stx, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "STX", },
Instruction { opcode: 0x8f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x90, ef: CPU::emu_bra, step: 0, ops: 1, mode: Rel, isbr: true, mnemonic: "BCC", },
Instruction { opcode: 0x91, ef: CPU::emu_sta, step: 2, ops: 1, mode: Iny, isbr: false, mnemonic: "STA", },
Instruction { opcode: 0x92, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x93, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x94, ef: CPU::emu_sty, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "STY", },
Instruction { opcode: 0x95, ef: CPU::emu_sta, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "STA", },
Instruction { opcode: 0x96, ef: CPU::emu_stx, step: 2, ops: 1, mode: Zpy, isbr: false, mnemonic: "STX", },
Instruction { opcode: 0x97, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x98, ef: CPU::emu_tya, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "TYA", },
Instruction { opcode: 0x99, ef: CPU::emu_sta, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "STA", },
Instruction { opcode: 0x9a, ef: CPU::emu_txs, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "TXS", },
Instruction { opcode: 0x9b, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x9c, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x9d, ef: CPU::emu_sta, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "STA", },
Instruction { opcode: 0x9e, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0x9f, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xa0, ef: CPU::emu_ldy, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "LDY", },
Instruction { opcode: 0xa1, ef: CPU::emu_lda, step: 2, ops: 1, mode: Inx, isbr: false, mnemonic: "LDA", },
Instruction { opcode: 0xa2, ef: CPU::emu_ldx, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "LDX", },
Instruction { opcode: 0xa3, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xa4, ef: CPU::emu_ldy, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "LDY", },
Instruction { opcode: 0xa5, ef: CPU::emu_lda, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "LDA", },
Instruction { opcode: 0xa6, ef: CPU::emu_ldx, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "LDX", },
Instruction { opcode: 0xa7, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xa8, ef: CPU::emu_tay, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "TAY", },
Instruction { opcode: 0xa9, ef: CPU::emu_lda, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "LDA", },
Instruction { opcode: 0xaa, ef: CPU::emu_tax, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "TAX", },
Instruction { opcode: 0xab, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xac, ef: CPU::emu_ldy, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "LDY", },
Instruction { opcode: 0xad, ef: CPU::emu_lda, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "LDA", },
Instruction { opcode: 0xae, ef: CPU::emu_ldx, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "LDX", },
Instruction { opcode: 0xaf, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xb0, ef: CPU::emu_bra, step: 0, ops: 1, mode: Rel, isbr: true, mnemonic: "BCS", },
Instruction { opcode: 0xb1, ef: CPU::emu_lda, step: 2, ops: 1, mode: Iny, isbr: false, mnemonic: "LDA", },
Instruction { opcode: 0xb2, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xb3, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xb4, ef: CPU::emu_ldy, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "LDY", },
Instruction { opcode: 0xb5, ef: CPU::emu_lda, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "LDA", },
Instruction { opcode: 0xb6, ef: CPU::emu_ldx, step: 2, ops: 1, mode: Zpy, isbr: false, mnemonic: "LDX", },
Instruction { opcode: 0xb7, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xb8, ef: CPU::emu_clv, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "CLV", },
Instruction { opcode: 0xb9, ef: CPU::emu_lda, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "LDA", },
Instruction { opcode: 0xba, ef: CPU::emu_tsx, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "TSX", },
Instruction { opcode: 0xbb, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xbc, ef: CPU::emu_ldy, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "LDY", },
Instruction { opcode: 0xbd, ef: CPU::emu_lda, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "LDA", },
Instruction { opcode: 0xbe, ef: CPU::emu_ldx, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "LDX", },
Instruction { opcode: 0xbf, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xc0, ef: CPU::emu_cpy, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "CPY", },
Instruction { opcode: 0xc1, ef: CPU::emu_cmp, step: 2, ops: 1, mode: Inx, isbr: false, mnemonic: "CMP", },
Instruction { opcode: 0xc2, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xc3, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xc4, ef: CPU::emu_cpy, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "CPY", },
Instruction { opcode: 0xc5, ef: CPU::emu_cmp, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "CMP", },
Instruction { opcode: 0xc6, ef: CPU::emu_dec, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "DEC", },
Instruction { opcode: 0xc7, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xc8, ef: CPU::emu_iny, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "INY", },
Instruction { opcode: 0xc9, ef: CPU::emu_cmp, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "CMP", },
Instruction { opcode: 0xca, ef: CPU::emu_dex, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "DEX", },
Instruction { opcode: 0xcb, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xcc, ef: CPU::emu_cpy, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "CPY", },
Instruction { opcode: 0xcd, ef: CPU::emu_cmp, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "CMP", },
Instruction { opcode: 0xce, ef: CPU::emu_dec, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "DEC", },
Instruction { opcode: 0xcf, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xd0, ef: CPU::emu_bra, step: 0, ops: 1, mode: Rel, isbr: true, mnemonic: "BNE", },
Instruction { opcode: 0xd1, ef: CPU::emu_cmp, step: 2, ops: 1, mode: Iny, isbr: false, mnemonic: "CMP", },
Instruction { opcode: 0xd2, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xd3, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xd4, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xd5, ef: CPU::emu_cmp, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "CMP", },
Instruction { opcode: 0xd6, ef: CPU::emu_dec, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "DEC", },
Instruction { opcode: 0xd7, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xd8, ef: CPU::emu_cld, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "CLD", },
Instruction { opcode: 0xd9, ef: CPU::emu_cmp, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "CMP", },
Instruction { opcode: 0xda, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xdb, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xdc, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xdd, ef: CPU::emu_cmp, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "CMP", },
Instruction { opcode: 0xde, ef: CPU::emu_dec, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "DEC", },
Instruction { opcode: 0xdf, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xe0, ef: CPU::emu_cpx, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "CPX", },
Instruction { opcode: 0xe1, ef: CPU::emu_sbc, step: 2, ops: 1, mode: Inx, isbr: false, mnemonic: "SBC", },
Instruction { opcode: 0xe2, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xe3, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xe4, ef: CPU::emu_cpx, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "CPX", },
Instruction { opcode: 0xe5, ef: CPU::emu_sbc, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "SBC", },
Instruction { opcode: 0xe6, ef: CPU::emu_inc, step: 2, ops: 1, mode: Zpg, isbr: false, mnemonic: "INC", },
Instruction { opcode: 0xe7, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xe8, ef: CPU::emu_inx, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "INX", },
Instruction { opcode: 0xe9, ef: CPU::emu_sbc, step: 2, ops: 1, mode: Imm, isbr: false, mnemonic: "SBC", },
Instruction { opcode: 0xea, ef: CPU::emu_nop, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "NOP", },
Instruction { opcode: 0xeb, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xec, ef: CPU::emu_cpx, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "CPX", },
Instruction { opcode: 0xed, ef: CPU::emu_sbc, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "SBC", },
Instruction { opcode: 0xee, ef: CPU::emu_inc, step: 3, ops: 2, mode: Abs, isbr: false, mnemonic: "INC", },
Instruction { opcode: 0xef, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xf0, ef: CPU::emu_bra, step: 0, ops: 1, mode: Rel, isbr: true, mnemonic: "BEQ", },
Instruction { opcode: 0xf1, ef: CPU::emu_sbc, step: 2, ops: 1, mode: Iny, isbr: false, mnemonic: "SBC", },
Instruction { opcode: 0xf2, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xf3, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xf4, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xf5, ef: CPU::emu_sbc, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "SBC", },
Instruction { opcode: 0xf6, ef: CPU::emu_inc, step: 2, ops: 1, mode: Zpx, isbr: false, mnemonic: "INC", },
Instruction { opcode: 0xf7, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xf8, ef: CPU::emu_sed, step: 1, ops: 0, mode: Imp, isbr: false, mnemonic: "SED", },
Instruction { opcode: 0xf9, ef: CPU::emu_sbc, step: 3, ops: 2, mode: Aby, isbr: false, mnemonic: "SBC", },
Instruction { opcode: 0xfa, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xfb, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xfc, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
Instruction { opcode: 0xfd, ef: CPU::emu_sbc, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "SBC", },
Instruction { opcode: 0xfe, ef: CPU::emu_inc, step: 3, ops: 2, mode: Abx, isbr: false, mnemonic: "INC", },
Instruction { opcode: 0xff, ef: CPU::emu_err, step: 0, ops: 0, mode: Unk, isbr: false, mnemonic: "---", },
];
| true
|
ebd464ee0e54776dace178405cceaf7b978f48bd
|
Rust
|
ra2003/build-fs-tree
|
/test/lib.rs
|
UTF-8
| 5,323
| 3
| 3
|
[
"MIT"
] |
permissive
|
use build_fs_tree::*;
use derive_more::{AsRef, Deref};
use maplit::btreemap;
use pipe_trait::Pipe;
use pretty_assertions::assert_eq;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use std::{
collections,
env::temp_dir,
ffi::OsString,
fs::{create_dir, read_dir, read_to_string, remove_dir_all},
io::Error,
path::{Path, PathBuf},
};
use text_block_macros::text_block_fnl;
use FileSystemTree::{Directory, File};
/// Representation of a temporary filesystem item.
///
/// **NOTE:** Delete this once https://github.com/samgiles/rs-mktemp/issues/8 is resolved.
#[derive(Debug, AsRef, Deref)]
pub struct Temp(PathBuf);
impl Temp {
/// Create a temporary directory.
pub fn new_dir() -> Result<Self, Error> {
let path = thread_rng()
.sample_iter(&Alphanumeric)
.take(15)
.map(char::from)
.collect::<String>()
.pipe(|name| temp_dir().join(name));
if path.exists() {
return Self::new_dir();
}
create_dir(&path)?;
path.pipe(Temp).pipe(Ok)
}
}
impl Drop for Temp {
fn drop(&mut self) {
let path = &self.0;
if let Err(error) = remove_dir_all(path) {
eprintln!("warning: Failed to delete {:?}: {}", path, error);
}
}
}
/// Create a YAML representation of a sample tree.
pub const SAMPLE_YAML: &str = text_block_fnl! {
"---"
"a:"
" abc: {}"
" def: content of a/def"
"b:"
" foo:"
" bar: content of b/foo/bar"
};
/// Create a sample tree.
pub fn sample_tree<Path, FileContent>() -> FileSystemTree<Path, FileContent>
where
Path: Ord,
&'static str: Into<Path> + Into<FileContent>,
{
Directory(btreemap! {
"a".into() => Directory(btreemap! {
"abc".into() => Directory(btreemap! {}),
"def".into() => File("content of a/def".into()),
}),
"b".into() => Directory(btreemap! {
"foo".into() => Directory(btreemap! {
"bar".into() => File("content of b/foo/bar".into()),
}),
}),
})
}
/// Create a sample tree (but with `dir!` and `file!` macros).
#[macro_export]
macro_rules! sample_tree {
() => {
dir! {
"a" => dir! {
"abc" => dir! {}
"def" => file!("content of a/def")
}
"b" => dir! {
"foo" => dir! {
"bar" => file!("content of b/foo/bar")
}
}
}
};
}
/// Create a temporary folder.
pub fn create_temp_dir() -> Temp {
Temp::new_dir().expect("create a temporary directory")
}
/// Create a set of `String` from `str` slices.
#[macro_export]
macro_rules! string_set {
($($element:expr),* $(,)?) => {
::maplit::btreeset! { $(::std::string::String::from($element)),* }
};
}
/// List names of children of a directory.
pub fn list_children_names(path: impl AsRef<Path>) -> collections::BTreeSet<String> {
read_dir(path)
.expect("read_dir")
.into_iter()
.filter_map(Result::ok)
.map(|entry| entry.file_name())
.map(OsString::into_string)
.filter_map(Result::ok)
.collect()
}
/// Read content of a text file.
pub fn read_text_file(path: impl AsRef<Path>) -> String {
read_to_string(path).expect("read_to_string")
}
/// Assert that a directory has a only has certain children.
#[macro_export]
macro_rules! assert_dir {
($path:expr, $expected:expr $(,)?) => {
match (crate::list_children_names($path), $expected) {
(actual, expected) => {
eprintln!("CASE: {} => {}", stringify!($path), stringify!($expected));
dbg!(&actual, &expected);
assert_eq!(
actual,
expected,
"{} => {}",
stringify!($path),
stringify!($expected),
);
}
}
};
}
/// Assert that content of a file is a certain text.
#[macro_export]
macro_rules! assert_file {
($path:expr, $expected:expr $(,)?) => {
match (crate::read_text_file($path), $expected) {
(actual, expected) => {
eprintln!("CASE: {} => {}", stringify!($path), stringify!($expected));
dbg!(&actual, &expected);
assert_eq!(
actual,
expected,
"{} => {}",
stringify!($path),
stringify!($expected),
);
}
}
};
}
/// Test the structure of an actual filesystem tree
pub fn test_sample_tree(root: &Path) {
assert_dir!(root, string_set!("a", "b"));
assert_dir!(root.join("a"), string_set!("abc", "def"));
assert_dir!(root.join("a").join("abc"), string_set!());
assert_file!(root.join("a").join("def"), "content of a/def");
assert_dir!(root.join("b"), string_set!("foo"));
assert_dir!(root.join("b").join("foo"), string_set!("bar"));
assert_file!(
root.join("b").join("foo").join("bar"),
"content of b/foo/bar",
);
}
#[cfg(test)]
mod build;
#[cfg(test)]
mod completions;
#[cfg(test)]
mod macros;
#[cfg(test)]
mod program;
#[cfg(test)]
mod tree;
#[cfg(test)]
mod yaml;
| true
|
64d3237bafb22a849c014e36ac3d39ab40d024b8
|
Rust
|
dcchut/fwatch
|
/src/lib.rs
|
UTF-8
| 8,750
| 3.59375
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
use std::path::PathBuf;
use std::time::SystemTime;
/// The base watchable trait.
pub trait Watchable {
/// The path associated with the watchable object
fn path(&self) -> &PathBuf;
}
/// A standalone implementation of the watchable trait.
#[derive(Debug, Clone)]
pub struct BasicTarget {
/// The path we want to watch
path: PathBuf,
}
impl BasicTarget {
pub fn new<T: Into<PathBuf>>(path: T) -> Self {
Self { path: path.into() }
}
}
impl Watchable for BasicTarget {
fn path(&self) -> &PathBuf {
&self.path
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
/// State transitions that a watchable may undergo.
pub enum Transition {
Created,
Modified,
Deleted,
None,
}
#[derive(Debug, Eq, PartialEq)]
/// The current state of the watchable.
pub enum WatchState {
DoesNotExist,
Exists(Option<SystemTime>),
}
#[derive(Debug, Default)]
/// A watcher instance.
///
/// An instance of Watcher keeps track of a vector of watchables and their corresponding states.
pub struct Watcher<W: Watchable> {
targets: Vec<W>,
states: Vec<WatchState>,
}
fn compute_state<W: Watchable>(target: &W) -> WatchState {
// Does the specified path exist
let file_exists = target.path().exists();
// Compute the last modification date of this file, if possible
let mut last_modified_date = None;
if file_exists {
// Determine the last modification time of this file
let metadata = std::fs::metadata(target.path());
if let Ok(metadata) = metadata {
if let Ok(modified) = metadata.modified() {
last_modified_date = Some(modified);
}
}
}
if file_exists {
WatchState::Exists(last_modified_date)
} else {
WatchState::DoesNotExist
}
}
impl<W: Watchable> Watcher<W> {
/// Create a new watcher instance.
///
/// # Examples
///
/// ```
/// use fwatch::{BasicTarget, Watcher};
///
/// fn main() {
/// let mut watcher: Watcher<BasicTarget> = Watcher::new();
/// }
/// ```
pub fn new() -> Self {
Watcher {
targets: Vec::new(),
states: Vec::new(),
}
}
/// Adds a target to the watcher.
///
/// # Examples
///
/// ```
/// use fwatch::{BasicTarget, Watcher};
///
/// fn main() {
/// let mut watcher : Watcher<BasicTarget> = Watcher::new();
///
/// // Watch the "foo.txt" path
/// watcher.add_target(BasicTarget::new("foo.txt"));
/// }
/// ```
pub fn add_target(&mut self, target: W) {
self.states.push(compute_state(&target));
self.targets.push(target);
}
/// Remove a target from the watcher.
///
/// This function will panic if index is greater than the size of self.states() / self.targets().
///
/// # Examples
///
/// ```
/// use fwatch::{BasicTarget, Watcher};
///
/// fn main() {
/// let mut watcher : Watcher<BasicTarget> = Watcher::new();
///
/// // Inserts "foo.txt" at index 0
/// watcher.add_target(BasicTarget::new("foo.txt"));
///
/// // Remove "foo.txt" from the watcher
/// assert!(watcher.remove_target(0));
/// }
/// ```
pub fn remove_target(&mut self, index: usize) -> bool {
if index > self.states.len() {
false
} else {
self.states.remove(index);
self.targets.remove(index);
true
}
}
/// Attempt to get the state corresponding to the given target index.
///
/// Note that this doesn't update the current state.
///
/// # Examples
///
/// ```no_run
/// use fwatch::{BasicTarget, Watcher, WatchState};
///
/// fn main() {
/// let mut watcher : Watcher<BasicTarget> = Watcher::new();
///
/// // Watch a file that doesn't exist
/// watcher.add_target(BasicTarget::new("does_not_exist.txt"));
/// assert_eq!(watcher.get_state(0).unwrap(), &WatchState::DoesNotExist);
///
/// // Watch a file that does exist
/// watcher.add_target(BasicTarget::new("exists.txt"));
/// assert_ne!(watcher.get_state(1).unwrap(), &WatchState::DoesNotExist);
/// }
/// ```
pub fn get_state(&self, index: usize) -> Option<&WatchState> {
self.states.get(index)
}
/// Attempt to get the path corresponding to the given target index.
///
/// # Examples
///
/// ```
/// use fwatch::{BasicTarget, Watcher, WatchState};
///
/// fn main() {
/// let mut watcher : Watcher<BasicTarget> = Watcher::new();
/// watcher.add_target(BasicTarget::new("foo.txt"));
///
/// let path = watcher.get_path(0).unwrap();
/// assert_eq!(path.to_str().unwrap(), "foo.txt");
/// }
/// ```
pub fn get_path(&self, index: usize) -> Option<&PathBuf> {
self.targets.get(index).and_then(|v| Some(v.path()))
}
/// Observe any state transitions in our targets.
///
/// Returns a vector containing the observed state transition for each target.
///
/// # Examples
///
/// ```
/// use fwatch::{BasicTarget, Watcher, Transition};
///
/// fn main() {
/// let mut watcher : Watcher<BasicTarget> = Watcher::new();
///
/// // Watch a file that doesn't exist
/// watcher.add_target(BasicTarget::new("does_not_exist.txt"));
///
/// let results = watcher.watch();
///
/// for (index, transition) in watcher.watch().into_iter().enumerate() {
/// // Get a reference to the path and state of the current target
/// let path = watcher.get_path(index).unwrap();
/// let state = watcher.get_state(index).unwrap();
///
/// match transition {
/// Transition::Created => { /* The watched file has been created */ },
/// Transition::Modified => { /* The watched file has been modified */ },
/// Transition::Deleted => { /* The watched file has been deleted */ },
/// Transition::None => { /* None of the above transitions were observed */ },
/// }
/// }
/// }
/// ```
pub fn watch(&mut self) -> Vec<Transition> {
let mut result = Vec::new();
for (index, target) in self.targets.iter().enumerate() {
let previous_state = self.states.get(index).unwrap();
let current_state = compute_state(target);
let mut transition = Transition::None;
// Check for state transitions
match (previous_state, ¤t_state) {
// The file was created
(WatchState::DoesNotExist, WatchState::Exists(_)) => {
transition = Transition::Created;
}
// The file was deleted
(WatchState::Exists(_), WatchState::DoesNotExist) => {
transition = Transition::Deleted;
}
// The file was modified
(WatchState::Exists(Some(t1)), WatchState::Exists(Some(t2))) if t1 != t2 => {
transition = Transition::Modified;
}
_ => {}
};
// now update our state vector
*self.states.get_mut(index).unwrap() = current_state;
result.push(transition);
}
result
}
}
#[cfg(test)]
mod tests {
use crate::{BasicTarget, Transition, Watcher};
use std::io::{Error, Write};
use std::thread::sleep;
use std::time::Duration;
use tempfile::NamedTempFile;
#[test]
/// Creates a temporary file and tests the modification + deletion transitions
fn transitions() -> Result<(), Error> {
let mut watcher: Watcher<BasicTarget> = Watcher::new();
// Open a named temporary file & add it to our watcher
let tmp = NamedTempFile::new()?;
watcher.add_target(BasicTarget::new(tmp.path()));
// We're going to modify our temporary file - to ensure the modification time
// ono our temporary file changes, we wait a bit over a second before modifying
sleep(Duration::from_millis(1500));
{
let mut handle = tmp.reopen()?;
write!(handle, "test")?;
}
// The watcher should notice the modification transition
assert_eq!(watcher.watch(), vec![Transition::Modified]);
// Delete the temporary file
tmp.close()?;
// The watcher should observe the deletion transition
assert_eq!(watcher.watch(), vec![Transition::Deleted]);
Ok(())
}
}
| true
|
9e9f6cb601731e7e7930038b9d6da478bc017adf
|
Rust
|
hukumka/aoc19
|
/src/bin/2.rs
|
UTF-8
| 1,624
| 3.578125
| 4
|
[] |
no_license
|
use aoc19::get_input;
fn main() {
let data: Vec<_> = get_input(2)
.trim()
.split(',')
.map(|s| s.parse::<usize>().unwrap())
.collect();
println!("part1: {}", part1(data.clone()));
println!("part2: {}", part2(data));
}
fn part1(mut data: Vec<usize>) -> usize {
data[1] = 53;
data[2] = 35;
run_program(&mut data);
data[0]
}
fn part2(input: Vec<usize>) -> usize {
for noun in 0..=99 {
for verb in 0..=99 {
let mut inp = input.clone();
inp[1] = noun;
inp[2] = verb;
run_program(&mut inp);
if inp[0] == 19_690_720 {
return noun * 100 + verb;
}
}
}
panic!();
}
fn run_program(data: &mut [usize]) {
let mut i = 0;
loop {
if data[i] == 99 {
break;
}
let a = data[data[i + 1]];
let b = data[data[i + 2]];
let res = data[i + 3];
data[res] = match data[i] {
1 => a + b,
2 => a * b,
_ => panic!(),
};
i += 4;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn t1() {
let mut input = vec![1, 0, 0, 0, 99];
run_program(&mut input);
assert_eq!(input, vec![2, 0, 0, 0, 99]);
}
#[test]
fn test_input() {
let data: Vec<_> = include_str!("../../tests/2/input")
.trim()
.split(',')
.map(|s| s.parse::<usize>().unwrap())
.collect();
assert_eq!(part1(data.clone()), 19690720);
assert_eq!(part2(data), 5335);
}
}
| true
|
9b013a066402975277ab512b870cf4a44bfb6e2c
|
Rust
|
launch-cmd/raspi-boot
|
/router_script/src/opt.rs
|
UTF-8
| 855
| 2.53125
| 3
|
[] |
no_license
|
use structopt::StructOpt;
#[derive(StructOpt, Debug)]
#[structopt(name = "basic")]
pub struct Opt {
#[structopt(short = "v", long = "verbose")]
pub verbose: bool,
#[structopt(short = "w", long = "wifi-region")]
pub wifi_region: String,
#[structopt(short = "s", long = "ssid", default_value = "RASPI_AP")]
pub ssid: String,
#[structopt(short = "p", long = "password", default_value = "raspberry")]
pub password: String,
#[structopt(long = "ap-ip", default_value = "10.0.2.1")]
pub ap_ip: String,
#[structopt(long = "dhcp-start-ip", default_value = "10.0.2.2")]
pub dhcp_start_ip: String,
#[structopt(long = "dhcp-end-ip", default_value = "10.0.2.254")]
pub dhcp_end_ip: String,
#[structopt(long = "dhcp-subnet-mask", default_value = "255.255.255.0")]
pub dhcp_subnet_mask: String,
}
| true
|
9e674b26107e08836565352326681c63ddf205a2
|
Rust
|
whalelephant/rust-multibase
|
/src/impls.rs
|
UTF-8
| 13,222
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
use crate::encoding;
use crate::error::Result;
pub(crate) trait BaseCodec {
/// Encode with the given byte slice.
fn encode<I: AsRef<[u8]>>(input: I) -> String;
/// Decode with the given string.
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>>;
}
/// Identity, 8-bit binary (encoder and decoder keeps data unmodified).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Identity;
impl BaseCodec for Identity {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
String::from_utf8(input.as_ref().to_vec()).expect("input must be valid UTF-8 bytes")
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(input.as_ref().as_bytes().to_vec())
}
}
/// Base2 (alphabet: 01).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base2;
impl BaseCodec for Base2 {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE2.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE2.decode(input.as_ref().as_bytes())?)
}
}
/// Base8 (alphabet: 01234567).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base8;
impl BaseCodec for Base8 {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE8.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE8.decode(input.as_ref().as_bytes())?)
}
}
/// Base10 (alphabet: 0123456789).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base10;
impl BaseCodec for Base10 {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
base_x::encode(encoding::BASE10, input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(base_x::decode(encoding::BASE10, input.as_ref())?)
}
}
/// Base16 lower hexadecimal (alphabet: 0123456789abcdef).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base16Lower;
impl BaseCodec for Base16Lower {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE16_LOWER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE16_LOWER.decode(input.as_ref().as_bytes())?)
}
}
/// Base16 upper hexadecimal (alphabet: 0123456789ABCDEF).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base16Upper;
impl BaseCodec for Base16Upper {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE16_UPPER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE16_UPPER.decode(input.as_ref().as_bytes())?)
}
}
/// Base32, rfc4648 no padding (alphabet: abcdefghijklmnopqrstuvwxyz234567).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32Lower;
impl BaseCodec for Base32Lower {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32_NOPAD_LOWER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32_NOPAD_LOWER.decode(input.as_ref().as_bytes())?)
}
}
/// Base32, rfc4648 no padding (alphabet: ABCDEFGHIJKLMNOPQRSTUVWXYZ234567).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32Upper;
impl BaseCodec for Base32Upper {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32_NOPAD_UPPER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32_NOPAD_UPPER.decode(input.as_ref().as_bytes())?)
}
}
/// Base32, rfc4648 with padding (alphabet: abcdefghijklmnopqrstuvwxyz234567).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32PadLower;
impl BaseCodec for Base32PadLower {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32_PAD_LOWER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32_PAD_LOWER.decode(input.as_ref().as_bytes())?)
}
}
/// Base32, rfc4648 with padding (alphabet: ABCDEFGHIJKLMNOPQRSTUVWXYZ234567).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32PadUpper;
impl BaseCodec for Base32PadUpper {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32_PAD_UPPER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32_PAD_UPPER.decode(input.as_ref().as_bytes())?)
}
}
/// Base32hex, rfc4648 no padding (alphabet: 0123456789abcdefghijklmnopqrstuv).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32HexLower;
impl BaseCodec for Base32HexLower {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32HEX_NOPAD_LOWER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32HEX_NOPAD_LOWER.decode(input.as_ref().as_bytes())?)
}
}
/// Base32hex, rfc4648 no padding (alphabet: 0123456789ABCDEFGHIJKLMNOPQRSTUV).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32HexUpper;
impl BaseCodec for Base32HexUpper {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32HEX_NOPAD_UPPER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32HEX_NOPAD_UPPER.decode(input.as_ref().as_bytes())?)
}
}
/// Base32hex, rfc4648 with padding (alphabet: 0123456789abcdefghijklmnopqrstuv).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32HexPadLower;
impl BaseCodec for Base32HexPadLower {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32HEX_PAD_LOWER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32HEX_PAD_LOWER.decode(input.as_ref().as_bytes())?)
}
}
/// Base32hex, rfc4648 with padding (alphabet: 0123456789ABCDEFGHIJKLMNOPQRSTUV).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32HexPadUpper;
impl BaseCodec for Base32HexPadUpper {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32HEX_PAD_UPPER.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32HEX_PAD_UPPER.decode(input.as_ref().as_bytes())?)
}
}
/// z-base-32 (used by Tahoe-LAFS) (alphabet: ybndrfg8ejkmcpqxot1uwisza345h769).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base32Z;
impl BaseCodec for Base32Z {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE32Z.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE32Z.decode(input.as_ref().as_bytes())?)
}
}
/// Base58 flicker (alphabet: 123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base58Flickr;
impl BaseCodec for Base58Flickr {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
base_x::encode(encoding::BASE58_FLICKR, input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(base_x::decode(encoding::BASE58_FLICKR, input.as_ref())?)
}
}
/// Base58 bitcoin (alphabet: 123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base58Btc;
impl BaseCodec for Base58Btc {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
base_x::encode(encoding::BASE58_BITCOIN, input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(base_x::decode(encoding::BASE58_BITCOIN, input.as_ref())?)
}
}
/// Base64, rfc4648 no padding (alphabet: ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base64;
impl BaseCodec for Base64 {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE64_NOPAD.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE64_NOPAD.decode(input.as_ref().as_bytes())?)
}
}
/// Base64, rfc4648 with padding (alphabet: ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base64Pad;
impl BaseCodec for Base64Pad {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE64_PAD.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE64_PAD.decode(input.as_ref().as_bytes())?)
}
}
/// Base64 url, rfc4648 no padding (alphabet: ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base64Url;
impl BaseCodec for Base64Url {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE64URL_NOPAD.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE64URL_NOPAD.decode(input.as_ref().as_bytes())?)
}
}
/// Base64 url, rfc4648 with padding (alphabet: ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub(crate) struct Base64UrlPad;
impl BaseCodec for Base64UrlPad {
fn encode<I: AsRef<[u8]>>(input: I) -> String {
encoding::BASE64URL_PAD.encode(input.as_ref())
}
fn decode<I: AsRef<str>>(input: I) -> Result<Vec<u8>> {
Ok(encoding::BASE64URL_PAD.decode(input.as_ref().as_bytes())?)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_identity() {
assert_eq!(Identity::encode(b"foo"), "foo");
assert_eq!(Identity::decode("foo").unwrap(), b"foo".to_vec());
}
#[test]
fn test_base2() {
assert_eq!(Base2::encode(b"foo"), "011001100110111101101111");
assert_eq!(
Base2::decode("011001100110111101101111").unwrap(),
b"foo".to_vec()
);
}
#[test]
fn test_base8() {
assert_eq!(Base8::encode(b"foo"), "31467557");
assert_eq!(Base8::decode("31467557").unwrap(), b"foo".to_vec());
}
#[test]
fn test_base10() {
assert_eq!(Base10::encode(b"foo"), "6713199");
assert_eq!(Base10::decode("6713199").unwrap(), b"foo".to_vec());
}
#[test]
fn test_base16() {
assert_eq!(Base16Lower::encode(b"foo"), "666f6f");
assert_eq!(Base16Lower::decode("666f6f").unwrap(), b"foo".to_vec());
assert_eq!(Base16Upper::encode(b"foo"), "666F6F");
assert_eq!(Base16Upper::decode("666F6F").unwrap(), b"foo".to_vec());
}
#[test]
fn test_base32() {
assert_eq!(Base32Lower::encode(b"foo"), "mzxw6");
assert_eq!(Base32Lower::decode("mzxw6").unwrap(), b"foo".to_vec());
assert_eq!(Base32Upper::encode(b"foo"), "MZXW6");
assert_eq!(Base32Upper::decode("MZXW6").unwrap(), b"foo".to_vec());
assert_eq!(Base32HexLower::encode(b"foo"), "cpnmu");
assert_eq!(Base32HexLower::decode("cpnmu").unwrap(), b"foo".to_vec());
assert_eq!(Base32HexUpper::encode(b"foo"), "CPNMU");
assert_eq!(Base32HexUpper::decode("CPNMU").unwrap(), b"foo".to_vec());
}
#[test]
fn test_base32_padding() {
assert_eq!(Base32PadLower::encode(b"foo"), "mzxw6===");
assert_eq!(Base32PadLower::decode("mzxw6===").unwrap(), b"foo".to_vec());
assert_eq!(Base32PadUpper::encode(b"foo"), "MZXW6===");
assert_eq!(Base32PadUpper::decode("MZXW6===").unwrap(), b"foo".to_vec());
assert_eq!(Base32HexPadLower::encode(b"foo"), "cpnmu===");
assert_eq!(
Base32HexPadLower::decode("cpnmu===").unwrap(),
b"foo".to_vec()
);
assert_eq!(Base32HexPadUpper::encode(b"foo"), "CPNMU===");
assert_eq!(
Base32HexPadUpper::decode("CPNMU===").unwrap(),
b"foo".to_vec()
);
}
#[test]
fn test_base32z() {
assert_eq!(Base32Z::encode(b"foo"), "c3zs6");
assert_eq!(Base32Z::decode("c3zs6").unwrap(), b"foo".to_vec());
}
#[test]
fn test_base58() {
assert_eq!(Base58Flickr::encode(b"foo"), "ApAP");
assert_eq!(Base58Flickr::decode("ApAP").unwrap(), b"foo".to_vec());
assert_eq!(Base58Btc::encode(b"foo"), "bQbp");
assert_eq!(Base58Btc::decode("bQbp").unwrap(), b"foo".to_vec());
}
#[test]
fn test_base64() {
assert_eq!(Base64::encode(b"foo"), "Zm9v");
assert_eq!(Base64::decode("Zm9v").unwrap(), b"foo".to_vec());
assert_eq!(Base64Url::encode(b"foo"), "Zm9v");
assert_eq!(Base64Url::decode("Zm9v").unwrap(), b"foo".to_vec());
}
#[test]
fn test_base64_padding() {
assert_eq!(Base64Pad::encode(b"foopadding"), "Zm9vcGFkZGluZw==");
assert_eq!(
Base64Pad::decode("Zm9vcGFkZGluZw==").unwrap(),
b"foopadding".to_vec()
);
assert_eq!(Base64UrlPad::encode(b"foopadding"), "Zm9vcGFkZGluZw==");
assert_eq!(
Base64UrlPad::decode("Zm9vcGFkZGluZw==").unwrap(),
b"foopadding".to_vec()
);
}
}
| true
|
7b45ae57ac2276c423b80470ad9208e1da1c12a2
|
Rust
|
dandels/dmodman
|
/src/ui/component/download_table.rs
|
UTF-8
| 2,774
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
use crate::api::Downloads;
use ratatui::layout::Constraint;
use ratatui::style::{Color, Style};
use ratatui::widgets::{Block, Borders, Cell, Row, Table, TableState};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio_stream::StreamExt;
pub struct DownloadTable<'a> {
pub state: TableState,
pub downloads: Downloads,
pub block: Block<'a>,
headers: Row<'a>,
pub highlight_style: Style,
pub widget: Table<'a>,
pub needs_redraw: AtomicBool,
redraw_terminal: Arc<AtomicBool>,
}
impl<'a> DownloadTable<'a> {
pub fn new(redraw_terminal: Arc<AtomicBool>, downloads: Downloads) -> Self {
let block = Block::default().borders(Borders::ALL).title("Downloads");
let headers = Row::new(
vec!["Filename", "Progress", "Status"]
.iter()
.map(|h| Cell::from(*h).style(Style::default().fg(Color::Red))),
);
downloads.has_changed.store(true, Ordering::Relaxed);
Self {
state: TableState::default(),
downloads,
block,
headers,
highlight_style: Style::default(),
widget: Table::new(vec![]),
needs_redraw: AtomicBool::new(false),
redraw_terminal,
}
}
// TODO would be good to not redraw the whole window, as it changes frequently
pub async fn refresh<'b>(&mut self)
where
'b: 'a,
{
if self.downloads.has_changed.swap(false, Ordering::Relaxed) {
let tasks = self.downloads.tasks.read().await;
let mut stream = tokio_stream::iter(tasks.values());
let mut rows: Vec<Row> = vec![];
while let Some(task) = stream.next().await {
rows.push(Row::new(vec![
task.dl_info.file_info.file_name.to_owned(),
task.dl_info.progress.to_string(),
task.dl_info.get_state().to_string(),
]))
}
self.widget = Table::new(rows)
.header(self.headers.to_owned())
.block(self.block.to_owned())
.widths(&[
Constraint::Percentage(60),
Constraint::Percentage(20),
Constraint::Percentage(20),
])
.highlight_style(self.highlight_style);
self.needs_redraw.store(false, Ordering::Relaxed);
self.redraw_terminal.store(true, Ordering::Relaxed);
} else if self.needs_redraw.swap(false, Ordering::Relaxed) {
self.widget = self.widget.clone().block(self.block.to_owned()).highlight_style(self.highlight_style);
self.redraw_terminal.store(true, Ordering::Relaxed);
}
}
}
| true
|
4cd1b9574d6ea93f6f7646d5395c3df7542de3a6
|
Rust
|
debris/matrix-rocketchat
|
/src/matrix-rocketchat/api/rocketchat/mod.rs
|
UTF-8
| 5,493
| 2.921875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
use std::collections::HashMap;
use iron::typemap::Key;
use reqwest::header::Headers;
use reqwest::Method;
use serde_json;
use slog::Logger;
use api::RestApi;
use errors::*;
use i18n::*;
/// Rocket.Chat REST API v1
pub mod v1;
/// A Rocket.Chat REST API endpoint.
pub trait Endpoint {
/// HTTP Method
fn method(&self) -> Method;
/// The URL of the endpoint
fn url(&self) -> String;
/// Payload that is sent to the server
fn payload(&self) -> Result<String>;
/// Headers that are sent to the server
fn headers(&self) -> Option<Headers>;
/// The query parameters that are used when sending the request
fn query_params(&self) -> HashMap<&'static str, &str> {
HashMap::new()
}
}
/// A Rocket.Chat channel
#[derive(Deserialize, Debug)]
pub struct Channel {
/// ID of the Rocket.Chat room
#[serde(rename = "_id")]
pub id: String,
/// Name of the Rocket.Chat room
pub name: Option<String>,
/// List of users in the room
pub usernames: Vec<String>,
}
/// A Rocket.Chat message
#[derive(Deserialize, Debug, Serialize)]
pub struct Message {
/// ID of the message
pub message_id: String,
/// Rocket.Chat token
pub token: Option<String>,
/// ID of the channel from which the message was sent
pub channel_id: String,
/// Name of the channel from which the message was sent
pub channel_name: Option<String>,
/// ID of the user who sent the message
pub user_id: String,
/// Name of the user who sent the message
pub user_name: String,
/// Message content
pub text: String,
}
/// A Rocket.Chat user
#[derive(Deserialize, Debug, Serialize)]
pub struct User {
/// ID of the Rocket.Chat user
#[serde(rename = "_id")]
pub id: String,
/// Name that is displayed in Rocket.Chat
pub username: String,
}
/// Rocket.Chat REST API
pub trait RocketchatApi {
/// List of channels on the Rocket.Chat server
fn channels_list(&self) -> Result<Vec<Channel>>;
/// Get the logged in users username
fn current_username(&self) -> Result<String>;
/// List of direct messages the user is part of
fn direct_messages_list(&self) -> Result<Vec<Channel>>;
/// Login a user on the Rocket.Chat server
fn login(&self, username: &str, password: &str) -> Result<(String, String)>;
/// Post a chat message
fn post_chat_message(&self, text: &str, room_id: &str) -> Result<()>;
/// Get information like user_id, status, etc. about a user
fn users_info(&self, username: &str) -> Result<User>;
/// Set credentials that are used for all API calls that need authentication
fn with_credentials(self: Box<Self>, user_id: String, auth_token: String) -> Box<RocketchatApi>;
}
/// Response format when querying the Rocket.Chat info endpoint
#[derive(Deserialize, Serialize)]
pub struct GetInfoResponse {
version: String,
}
impl RocketchatApi {
/// Creates a new Rocket.Chat API depending on the version of the API.
/// It returns a `RocketchatApi` trait, because for each version a different API is created.
pub fn new(base_url: String, logger: Logger) -> Result<Box<RocketchatApi>> {
let url = base_url.clone() + "/api/info";
let params = HashMap::new();
let (body, status_code) = match RestApi::call(Method::Get, &url, "", ¶ms, None) {
Ok((body, status_code)) => (body, status_code),
Err(err) => {
debug!(logger, "{}", err);
bail_error!(
ErrorKind::RocketchatServerUnreachable(url.clone()),
t!(["errors", "rocketchat_server_unreachable"]).with_vars(vec![("rocketchat_url", url)])
);
}
};
if !status_code.is_success() {
bail_error!(
ErrorKind::NoRocketchatServer(url.clone()),
t!(["errors", "no_rocketchat_server"]).with_vars(vec![("rocketchat_url", url.clone())])
);
}
let rocketchat_info: GetInfoResponse =
match serde_json::from_str(&body).chain_err(|| ErrorKind::NoRocketchatServer(url.clone())) {
Ok(rocketchat_info) => rocketchat_info,
Err(err) => {
bail_error!(err, t!(["errors", "no_rocketchat_server"]).with_vars(vec![("rocketchat_url", url)]));
}
};
RocketchatApi::get_max_supported_version_api(rocketchat_info.version, base_url, logger)
}
fn get_max_supported_version_api(version: String, base_url: String, logger: Logger) -> Result<Box<RocketchatApi>> {
let version_string = version.clone();
let mut versions = version_string.split('.').into_iter();
let major: i32 = versions.next().unwrap_or("0").parse().unwrap_or(0);
let minor: i32 = versions.next().unwrap_or("0").parse().unwrap_or(0);
if major == 0 && minor >= 49 {
let rocketchat_api = v1::RocketchatApi::new(base_url, logger);
return Ok(Box::new(rocketchat_api));
}
let min_version = "0.49".to_string();
Err(Error {
error_chain: ErrorKind::UnsupportedRocketchatApiVersion(min_version.clone(), version.clone()).into(),
user_message: Some(t!(["errors", "unsupported_rocketchat_api_version"]).with_vars(vec![
("min_version", min_version),
("version", version),
])),
})
}
}
impl Key for Message {
type Value = Message;
}
| true
|
e27acd63f725f9bf6b001d4c72823f160bc83193
|
Rust
|
zsparks/rust-stuff
|
/lam.rs
|
UTF-8
| 3,006
| 3.453125
| 3
|
[] |
no_license
|
extern mod std;
use std::list::*;
use core::option::*;
use core::to_str::*;
enum Tp {
Unit
, Arr (@Tp, @Tp)
}
impl Tp: Eq {
pure fn eq (other: &Tp) -> bool {
match (self, *other) {
(Unit, Unit) => true
, (Arr (@t1a, t2a), Arr (@t1b, t2b)) => t1a.eq (t2a) && t1b.eq (t2b)
, _ => false
}
}
pure fn ne (other: &Tp) -> bool {
! (self.eq (other))
}
}
impl Tp: ToStr {
pure fn to_str() -> ~str {
match self {
Unit => ~"Unit"
, Arr (@t1, @t2) =>
str::append (t1.to_str(), str::append (~" -> ", t2.to_str()))
}
}
}
impl Exp: ToStr {
pure fn to_str() -> ~str {
match self {
Triv => ~"Triv"
, App (@e1, @e2) =>
str::concat ([e1.to_str(), ~" ", e2.to_str()])
, Lam (@t, @body) =>
str::concat ([~"Lambda ", t.to_str(), ~" (", body.to_str(), ~")"])
, Var (i) => i.to_str()
}
}
}
enum Exp {
Triv
, Lam (@Tp, @Exp)
, App (@Exp, @Exp)
, Var (int)
}
pure fn nth<T: Copy> (l: List<T>, n: int) -> Option<T> {
match l {
Nil => None
, Cons (t, @l) => if (n == 0) { Some (t) } else { nth (l, n - 1) }
}
}
pure fn typecheck (ctx: List<@Tp>, exp: @Exp) -> Option<@Tp> {
match *exp {
Triv => Some (@Unit)
, Lam (t, e) =>
do chain (typecheck (Cons (t, @ctx), e))
|tout| { Some (@Arr (t, tout)) }
, App (e1, e2) =>
match typecheck (ctx, e1) {
Some (@Arr (@t, tout)) =>
do chain (typecheck (ctx, e2))
|t2| {
if (t.eq (t2)) {
Some (tout)
}
else {
None
}
}
, _ => None
}
, Var (i) => nth (ctx, i)
}
}
pure fn value (e: @Exp) -> bool {
match *e {
Triv => true
, Lam (*) => true
, _ => false
}
}
pure fn subst_i (e: @Exp, ehole: @Exp, i: int) -> @Exp {
match *ehole {
Var (j) if j == i => e
, Var (j) => @ Var (j - 1)
, App (e1, e2) => @ App (subst_i (e, e1, i), subst_i (e, e2, i))
, Lam (t, body) => @ Lam (t, subst_i (e, body, i + 1))
, Triv => ehole
}
}
pure fn subst (e: @Exp, ehole: @Exp) -> @Exp {
subst_i (e, ehole, 0)
}
pure fn step (e: @Exp) -> Option<@Exp> {
match *e {
App (e1 @ @Lam (_, body), e2) =>
if value (e2) {
Some (subst (e2, body))
}
else {
do chain (step (e2))
|e2out| { Some (@App (e1, e2out)) }
}
, _ => None
}
}
pure fn eval (e: @Exp) -> Option<@Exp> {
if value (e) {
Some (e)
}
else {
chain (step (e), eval)
}
}
fn typecheck_and_print (e: @Exp) {
do iter (@ typecheck (Nil, e))
|out| { io::println ((**out).to_str()); };
}
fn eval_and_print (e: @Exp) {
match (eval (e)) {
None => io::println ("Failure.")
, Some (v) => io::println (v.to_str())
}
}
fn main () {
let id_exp : @Exp = @Lam (@Unit, @Var (0));
let id_app_triv : @Exp = @App (id_exp, @Triv);
typecheck_and_print (id_exp);
typecheck_and_print (id_app_triv);
eval_and_print (id_app_triv);
}
| true
|
e127c9ae4fa4edbed749bef609fe674dd4cd9116
|
Rust
|
sloshwoven/plotters
|
/src/drawing/backend_impl/bitmap.rs
|
UTF-8
| 3,330
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
use crate::drawing::backend::{BackendCoord, DrawingBackend, DrawingErrorKind};
use crate::style::{Color, RGBAColor};
use image::{ImageError, Rgb, RgbImage};
use std::path::Path;
enum Target<'a> {
File(&'a Path),
Buffer(&'a mut Vec<u8>),
}
/// The backend that drawing a bitmap
pub struct BitMapBackend<'a> {
/// The path to the image
target: Target<'a>,
/// The image object
img: RgbImage,
/// Flag indicates if the bitmap has been saved
saved: bool,
}
impl<'a> BitMapBackend<'a> {
/// Create a new bitmap backend
pub fn new<T: AsRef<Path> + ?Sized>(path: &'a T, dimension: (u32, u32)) -> Self {
Self {
target: Target::File(path.as_ref()),
img: RgbImage::new(dimension.0, dimension.1),
saved: false,
}
}
/// Create a new bitmap backend which only lives in-memory
pub fn with_buffer(buf: &'a mut Vec<u8>, dimension: (u32, u32)) -> Self {
Self {
target: Target::Buffer(buf),
img: RgbImage::new(dimension.0, dimension.1),
saved: false,
}
}
}
impl<'a> DrawingBackend for BitMapBackend<'a> {
type ErrorType = ImageError;
fn get_size(&self) -> (u32, u32) {
(self.img.width(), self.img.height())
}
fn ensure_prepared(&mut self) -> Result<(), DrawingErrorKind<ImageError>> {
Ok(())
}
fn present(&mut self) -> Result<(), DrawingErrorKind<ImageError>> {
match &mut self.target {
Target::File(path) => {
self.img
.save(&path)
.map_err(|x| DrawingErrorKind::DrawingError(ImageError::IoError(x)))?;
self.saved = true;
Ok(())
}
Target::Buffer(target) => {
let mut actual_img = RgbImage::new(1, 1);
std::mem::swap(&mut actual_img, &mut self.img);
target.clear();
target.append(&mut actual_img.into_raw());
Ok(())
}
}
}
fn draw_pixel(
&mut self,
point: BackendCoord,
color: &RGBAColor,
) -> Result<(), DrawingErrorKind<ImageError>> {
if point.0 as u32 >= self.img.width()
|| point.0 < 0
|| point.1 as u32 >= self.img.height()
|| point.1 < 0
{
return Ok(());
}
let alpha = color.alpha();
let rgb = color.rgb();
if alpha >= 1.0 {
self.img.put_pixel(
point.0 as u32,
point.1 as u32,
Rgb {
data: [rgb.0, rgb.1, rgb.2],
},
);
} else {
let pixel = self.img.get_pixel_mut(point.0 as u32, point.1 as u32);
let new_color = [rgb.0, rgb.1, rgb.2];
pixel
.data
.iter_mut()
.zip(&new_color)
.for_each(|(old, new)| {
*old = (f64::from(*old) * (1.0 - alpha) + f64::from(*new) * alpha).min(255.0)
as u8;
});
}
Ok(())
}
}
impl Drop for BitMapBackend<'_> {
fn drop(&mut self) {
if !self.saved {
self.present().expect("Unable to save the bitmap");
}
}
}
| true
|
22e2bd0ea0a63a7f4f5f732032e09a02ea128b90
|
Rust
|
bertho-zero/solang
|
/tests/solana_tests/expressions.rs
|
UTF-8
| 3,973
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
use crate::{build_solidity, first_error, no_errors, parse_and_resolve};
use ethabi::Token;
use solang::Target;
#[test]
fn interfaceid() {
let ns = parse_and_resolve(
r#"
contract foo {
function get() public returns (bytes4) {
return type(foo).interfaceId;
}
}"#,
Target::Solana,
);
assert_eq!(
first_error(ns.diagnostics),
"type(…).interfaceId is permitted on interface, not contract foo"
);
let mut vm = build_solidity(
r#"
contract foo {
function get() public returns (bytes4) {
return type(I).interfaceId;
}
}
interface I {
function bar(int) external;
function baz(bytes) external returns (int);
}"#,
);
vm.constructor("foo", &[], 0);
let returns = vm.function("get", &[], &[], 0);
assert_eq!(
returns,
vec![Token::FixedBytes(0xc78d9f3au32.to_be_bytes().to_vec())]
);
}
#[test]
fn const_in_type() {
let ns = parse_and_resolve(
r#"
contract x {
uint public constant Y = 24;
constructor(bytes32[Y] memory foo) {}
}"#,
Target::Solana,
);
no_errors(ns.diagnostics);
}
#[test]
fn bytes32_0() {
let ns = parse_and_resolve(
r#"
contract x {
function b32() public pure returns (bytes32 r) {
r = bytes32(0);
}
function b4() public pure returns (bytes4 r) {
r = bytes4(0xcafedead);
}
function b3() public pure returns (bytes3 r) {
r = bytes3(0x012233);
}
}"#,
Target::Solana,
);
no_errors(ns.diagnostics);
let ns = parse_and_resolve(
r#"
contract foo {
function b32() public pure returns (bytes32 r) {
r = bytes32(0xffee);
}
}"#,
Target::Solana,
);
assert_eq!(
first_error(ns.diagnostics),
"number of 2 bytes cannot be converted to type ‘bytes32’"
);
let ns = parse_and_resolve(
r#"
contract foo {
function b32() public pure returns (bytes32 r) {
r = bytes32(-1);
}
}"#,
Target::Solana,
);
assert_eq!(
first_error(ns.diagnostics),
"negative number cannot be converted to type ‘bytes32’"
);
}
#[test]
fn contract_no_init() {
let ns = parse_and_resolve(
r#"
contract other {
int public a;
}
contract testing {
function test(int x) public returns (int) {
other o;
do {
x--;
o = new other();
}while(x > 0);
return o.a();
}
}"#,
Target::Solana,
);
no_errors(ns.diagnostics);
}
#[test]
fn selector_in_free_function() {
let ns = parse_and_resolve(
r#"
interface I {
function X(bytes) external;
}
function x() returns (bytes4) {
return I.X.selector;
}
contract foo {}
"#,
Target::Solana,
);
no_errors(ns.diagnostics);
let ns = parse_and_resolve(
r#"
interface I {
function X(bytes) external;
}
contract X {
function x() public returns (bytes4) {
return I.X.selector;
}
}"#,
Target::Solana,
);
no_errors(ns.diagnostics);
let ns = parse_and_resolve(
r#"
contract I {
function X() external {}
}
contract foo {
function f(I t) public returns (bytes4) {
return t.X.selector;
}
}
"#,
Target::Solana,
);
no_errors(ns.diagnostics);
}
| true
|
7b057eb00ebbfc5c1ec66ccfa5a6866384dcafa7
|
Rust
|
DrSloth/update_channel
|
/src/receiver.rs
|
UTF-8
| 6,455
| 3.421875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
use std::{
cell::UnsafeCell,
sync::{Arc, RwLock, RwLockReadGuard},
};
/// The receiving half of an update channel
///
/// This struct holds an internal buffer in which data is held.
/// The buffer can then be updated with recv_update, take_update usw.
#[derive(Debug)]
pub struct Receiver<T> {
pub(crate) cell: UnsafeCell<T>,
pub(crate) shared: Arc<RwLock<Option<T>>>,
}
impl<T> Receiver<T> {
/// Takes the new updated value and make it unavailable for all other receivers.
/// Returns the previous value inside the buffer if the value has not been taken by another receiver,
/// otherwise Returns Ok(None)
pub fn take_update(&mut self) -> Result<Option<T>, ReceiveError> {
unsafe {
self.take_update_unsafe()
}
}
/// Same as [`Receiver::take_update`](struct.Receiver.html#method.take_update) but without borrowing mutably.
/// This might open new possiblities, but it might result in undefined behavior if there are immutable borrows
/// created with [`Receiver::borrow`](struct.Receiver.html#method.borrow)
pub unsafe fn take_update_unsafe(&self) -> Result<Option<T>, ReceiveError> {
let mut lock = self.shared.write().map_err(|_| ReceiveError)?;
if let Some(s) = lock.take() {
let old = std::mem::replace(&mut *self.cell.get(), s);
Ok(Some(old))
} else {
Ok(None)
}
}
/// Get the value saved in the internal buffer without updating as borrow
pub fn borrow(&self) -> &T {
unsafe { &*self.cell.get() }
}
/// Get a mutable reference to the value in the buffer
pub fn borrow_mut(&mut self) -> &mut T {
unsafe { &mut *self.cell.get() }
}
/// Unwrap the value contained in the buffer of this receiver
pub fn into_inner(self) -> T {
self.cell.into_inner()
}
/// Get the latest value (not the value in the buffer) and return it while holding a read lock to it.
/// It is not recommended to hold on to this lock for long.
pub fn borrow_locked<'a>(&'a self) -> Result<RwLockReadGuard<'a, Option<T>>, ReceiveError> {
self.shared.read().map_err(|_| ReceiveError)
}
/// Checks if at least one updater exists
pub fn has_updater(&self) -> bool {
Arc::weak_count(&self.shared) != 0
}
}
impl<T> Receiver<T>
where
T: Clone,
{
///Clone the latest updated value into the internal buffer.
/// Returns the previous value inside the buffer if the value has not been taken by another receiver
pub fn recv_update(&mut self) -> Result<Option<T>, ReceiveError> {
unsafe {
self.recv_update_unsafe()
}
}
/// Same as [`Receiver::recv_update`](struct.Receiver.html#method.recv_update) but without borrowing mutably.
/// This might open new possiblities, but it might result in undefined behavior if there are immutable borrows
/// created with [`Receiver::borrow`](struct.Receiver.html#method.borrow)
pub unsafe fn recv_update_unsafe(&self) -> Result<Option<T>, ReceiveError> {
let lock = self.shared.read().map_err(|_| ReceiveError)?;
if let Some(s) = &*lock {
let old = std::mem::replace(&mut *self.cell.get(), s.clone());
Ok(Some(old))
} else {
Ok(None)
}
}
/// Get the latest value (not the value in the buffer) cloned
pub fn get_cloned(&self) -> Result<Option<T>, ReceiveError> {
self.shared
.read()
.map_err(|_| ReceiveError)
.map(|v| v.clone())
}
}
impl<T> Receiver<T>
where
T: Clone + PartialEq,
{
/// Updates the internal buffer by cloning the updated value if it is different from the current held value
pub fn recv_update_checked(&mut self) -> Result<Option<T>, ReceiveError> {
unsafe { self.recv_update_checked_unsafe() }
}
/// Same as [`Receiver::recv_update_checked`](struct.Receiver.html#method.recv_update_checked) but without borrowing mutably.
/// This might open new possiblities, but it might result in undefined behavior if there are immutable borrows
/// created with [`Receiver::borrow`](struct.Receiver.html#method.borrow)
pub unsafe fn recv_update_checked_unsafe(&self) -> Result<Option<T>, ReceiveError> {
let lock = self.shared.read().map_err(|_| ReceiveError)?;
if let Some(value) = &*lock {
let vptr = self.cell.get();
if value != &*vptr {
let old = std::mem::replace(&mut *vptr, value.clone() );
Ok(Some(old))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
/// Updates the internal buffer by taking the updated value if it is different from the current held value
pub fn take_update_checked(&mut self) -> Result<Option<T>, ReceiveError> {
unsafe { self.take_update_checked_unsafe() }
}
/// Same as [`Receiver::recv_update_checked`](struct.Receiver.html#method.recv_update_checked) but without borrowing mutably.
/// This might open new possiblities, but it might result in undefined behavior if there are immutable borrows
/// created with [`Receiver::borrow`](struct.Receiver.html#method.borrow)
pub unsafe fn take_update_checked_unsafe(&self) -> Result<Option<T>, ReceiveError> {
let mut lock = self.shared.write().map_err(|_| ReceiveError)?;
if let Some(value) = lock.take() {
let vptr = self.cell.get();
if value != *vptr {
let old = std::mem::replace(&mut *vptr, value.clone() );
Ok(Some(old))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
}
impl<T> Clone for Receiver<T>
where
T: Clone,
{
fn clone(&self) -> Self {
Self {
shared: Arc::clone(&self.shared),
cell: UnsafeCell::new(unsafe { (&*self.cell.get()).clone() }),
}
}
}
unsafe impl<T> Send for Receiver<T> where T: Clone {}
/// An error that might occur while receiving a value
#[derive(Debug, Clone)]
pub struct ReceiveError;
impl std::fmt::Display for ReceiveError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
use std::error::Error;
impl Error for ReceiveError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
| true
|
85619c8cb307ffdcdb7ee2db9c827c7196dec66b
|
Rust
|
QuickLimeCA/x509
|
/src/x509.rs
|
UTF-8
| 7,827
| 3.09375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
pub mod x509 {
use simple_asn1::{ToASN1, FromASN1, ASN1Block, ASN1Class, ASN1DecodeErr, ASN1EncodeErr};
use num::bigint::BigInt;
use num::ToPrimitive;
#[derive(Debug, PartialEq)]
pub enum Version {
V1,
V2,
V3
}
impl ToASN1 for Version {
type Error = ASN1EncodeErr;
fn to_asn1_class(&self, _c: ASN1Class) -> Result<Vec<ASN1Block>, Self::Error> {
let val = match self {
&Version::V1 => 0,
&Version::V2 => 1,
&Version::V3 => 2,
};
Result::Ok(vec![ASN1Block::Integer(ASN1Class::Universal, 0, BigInt::from(val))])
}
}
impl FromASN1 for Version {
type Error = ASN1DecodeErr;
fn from_asn1(v: &[ASN1Block]) -> Result<(Self, &[ASN1Block]), Self::Error> {
let (head, tail) = v.split_at(1);
match head[0] {
ASN1Block::Integer(class, _, ref val) => {
match class {
ASN1Class::Universal => {
if val < &BigInt::from(0) || val > &BigInt::from(2) {
return Err(ASN1DecodeErr::UTF8DecodeFailure);
}
else if val == &BigInt::from(0) {
return Ok((Version::V1, &tail));
}
else if val == &BigInt::from(1) {
return Ok((Version::V2, &tail));
}
Ok((Version::V3, &tail))
},
_ => Err(ASN1DecodeErr::UTF8DecodeFailure)
}
},
_ => Err(ASN1DecodeErr::UTF8DecodeFailure)
}
}
}
#[derive(Debug, PartialEq)]
pub struct CertificateSerialNumber(pub i64);
impl ToASN1 for CertificateSerialNumber {
type Error = ASN1EncodeErr;
fn to_asn1_class(&self, _c: ASN1Class) -> Result<Vec<ASN1Block>, Self::Error> {
Result::Ok(vec![ASN1Block::Integer(ASN1Class::Universal, 0, BigInt::from(self.0))])
}
}
impl FromASN1 for CertificateSerialNumber {
type Error = ASN1DecodeErr;
fn from_asn1(v: &[ASN1Block]) -> Result<(Self, &[ASN1Block]), Self::Error> {
let (head, tail) = v.split_at(1);
match head[0] {
ASN1Block::Integer(class, _, ref val) => {
match class {
ASN1Class::Universal => Ok((CertificateSerialNumber(BigInt::to_i64(val).unwrap()), &tail)),
_ => Err(ASN1DecodeErr::UTF8DecodeFailure)
}
},
_ => Err(ASN1DecodeErr::UTF8DecodeFailure)
}
}
}
}
#[cfg(test)]
mod version_tests {
use simple_asn1::{der_decode, der_encode, from_der, FromASN1, ASN1Block, ASN1DecodeErr};
use super::x509::Version;
#[test]
fn version_encodes_v1_correctly() {
let expected = vec![0x02, 0x01, 0x00];
let actual = der_encode(&Version::V1).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn version_encodes_v2_correctly() {
let expected = vec![0x02, 0x01, 0x01];
let actual = der_encode(&Version::V2).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn version_encodes_v3_correctly() {
let expected = vec![0x02, 0x01, 0x02];
let actual = der_encode(&Version::V3).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn version_decodes_v1_correctly() {
let expected = Version::V1;
let actual = der_decode::<Version>(&vec![0x02, 0x01, 0x00]).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn version_decodes_v2_correctly() {
let expected = Version::V2;
let actual = der_decode::<Version>(&vec![0x02, 0x01, 0x01]).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn version_decodes_v3_correctly() {
let expected = Version::V3;
let actual = der_decode::<Version>(&vec![0x02, 0x01, 0x02]).unwrap();
assert_eq!(expected, actual);
}
#[test]
fn version_should_err_when_unsupported_version_supplied() {
let error = der_decode::<Version>(&vec![0x02, 0x01, 0x03]).unwrap_err();
assert_eq!(error, ASN1DecodeErr::UTF8DecodeFailure)
}
#[test]
fn version_should_not_break_decoding_subsequent_blocks() {
// ASN.1 Sequence of 3 Versions: v1, v2 & v3
let test_data = vec![0x30, 0x09, 0x02, 0x01, 0x00, 0x02, 0x01, 0x01, 0x02, 0x01, 0x02];
let expected = vec![Version::V1, Version::V2, Version::V3];
let seq = &from_der(&test_data).unwrap()[0];
match seq {
&ASN1Block::Sequence(_, _, ref blocks) => {
let (first_actual, first_tail) = Version::from_asn1(&blocks).unwrap();
let (second_actual, second_tail) = Version::from_asn1(&first_tail).unwrap();
let (third_actual, _) = Version::from_asn1(&second_tail).unwrap();
assert_eq!(expected[0], first_actual);
assert_eq!(expected[1], second_actual);
assert_eq!(expected[2], third_actual);
},
_ => panic!("Not a sequence")
}
}
}
#[cfg(test)]
mod certificate_serial_numbers_tests {
use simple_asn1::{der_decode, der_encode};
use super::x509::CertificateSerialNumber;
macro_rules! decoding_test {
($name:ident, $input:expr, $expected:expr) => {
#[test]
fn $name() {
let actual = der_decode::<CertificateSerialNumber>($input).unwrap();
let expected = CertificateSerialNumber($expected);
assert_eq!(expected, actual);
}
}
}
macro_rules! encoding_test {
($name:ident, $input:expr, $expected:expr) => {
#[test]
fn $name() {
let actual = der_encode(&CertificateSerialNumber($input)).unwrap();
let expected = $expected;
assert_eq!(expected, actual);
}
}
}
decoding_test!(certificate_serial_number_should_decode_0, &vec![0x02, 0x01, 0x00], 0);
decoding_test!(certificate_serial_number_should_decode_1, &vec![0x02, 0x01, 0x01], 1);
decoding_test!(certificate_serial_number_should_decode_negative_1, &vec![0x02, 0x01, 0xFF], -1);
decoding_test!(certificate_serial_number_should_decode_negative_42, &vec![0x02, 0x01, 0xD6], -42);
decoding_test!(certificate_serial_number_should_decode_42, &vec![0x02, 0x01, 0x2A], 42);
decoding_test!(certificate_serial_number_should_decode_i64_max, &vec![0x02, 0x08, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF], 9223372036854775807);
decoding_test!(certificate_serial_number_should_decode_i64_min, &vec![0x02, 0x08, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], -9223372036854775808);
encoding_test!(certificate_serial_number_should_encode_0, 0, vec![0x02, 0x01, 0x00]);
encoding_test!(certificate_serial_number_should_encode_1, 1, vec![0x02, 0x01, 0x01]);
encoding_test!(certificate_serial_number_should_encode_negative_1, -1, vec![0x02, 0x01, 0xFF]);
encoding_test!(certificate_serial_number_should_encode_negative_42, -42, vec![0x02, 0x01, 0xD6]);
encoding_test!(certificate_serial_number_should_encode_42, 42, vec![0x02, 0x01, 0x2A]);
encoding_test!(certificate_serial_number_should_encode_i64_max, 9223372036854775807, vec![0x02, 0x08, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]);
encoding_test!(certificate_serial_number_should_encode_i64_min, -9223372036854775808, vec![0x02, 0x08, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
}
| true
|
7fae21c32a20d8b95dbab66148d435c884cf1432
|
Rust
|
kunicmarko20/lsbasi-rs
|
/part6/src/lexer.rs
|
UTF-8
| 2,320
| 3.765625
| 4
|
[] |
no_license
|
use super::token::Token;
pub struct Lexer {
text: String,
position: usize,
current_character: Option<char>,
}
impl Lexer {
pub fn new<T: Into<String>>(text: T) -> Self {
let text_as_string = text.into();
Lexer{
current_character: (&text_as_string).chars().next(),
text: text_as_string,
position: 0,
}
}
pub fn advance(&mut self) {
self.position += 1;
self.current_character = self.text.chars().nth(self.position);
}
fn skip_whitespace(&mut self) {
while let Some(value) = self.current_character {
if value.is_whitespace() {
self.advance();
continue;
}
return;
}
}
fn integer(&mut self) -> u32 {
let mut result = String::new();
while let Some(character) = self.current_character {
if character.is_digit(10) {
result += &character.to_string();
self.advance();
continue;
}
break;
}
result.parse::<u32>().unwrap()
}
pub fn get_next_token(&mut self) -> Token {
while let Some(character) = self.current_character {
if character.is_whitespace() {
self.skip_whitespace();
continue;
}
if character.is_digit(10) {
return Token::Integer(self.integer());
}
match character {
'+' => {
self.advance();
return Token::Plus;
},
'-' => {
self.advance();
return Token::Minus;
},
'*' => {
self.advance();
return Token::Mul;
},
'/' => {
self.advance();
return Token::Div;
},
'(' => {
self.advance();
return Token::LParen;
},
')' => {
self.advance();
return Token::RParen;
},
_ => ()
}
}
return Token::End;
}
}
| true
|
15878887109cfb412f3173422dfda39cec98bfd5
|
Rust
|
la10736/rstest_playground
|
/hello_proc_macro/src/lib.rs
|
UTF-8
| 1,187
| 2.609375
| 3
|
[] |
no_license
|
extern crate proc_macro;
use quote::quote;
#[proc_macro_attribute]
pub fn hello(_attrs: proc_macro::TokenStream, body: proc_macro::TokenStream) -> proc_macro::TokenStream{
// Parse tokens
let ast = syn::parse(body).unwrap();
// Build impl
let gen = impl_hello_world(ast);
println!(r#"
--------------------- RESULT CODE ---------------------
{}
-------------------------------------------------------"#, gen);
// Return the generated impl
gen.into()
}
fn impl_hello_world(item: syn::ItemFn) -> proc_macro2::TokenStream {
let vis = &item.vis;
let attrs = &item.attrs;
let sig = &item.sig;
let block = &item.block;
let args = item.sig.inputs.iter()
.filter_map(maybe_ident);
quote! {
#(#attrs)*
#vis #sig {
#(
println!("{} = {}", stringify!(#args), #args);
)*
#block
}
}
}
fn maybe_ident(arg: &syn::FnArg) -> Option<&syn::Ident> {
match arg {
syn::FnArg::Typed(syn::PatType { pat, .. }) => match pat.as_ref() {
syn::Pat::Ident(ident) => Some(&ident.ident),
_ => None,
},
_ => None,
}
}
| true
|
b804b0f21964bb6c25cca3678f3010fae466fd08
|
Rust
|
lemonrock/file-descriptors
|
/src/file_descriptor_information/ExtendFromSlice.rs
|
UTF-8
| 3,514
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
// This file is part of file-descriptors. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT. No part of file-descriptors, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2019 The developers of file-descriptors. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT.
pub(crate) trait ExtendFromSlice: AsRef<[u8]>
{
#[inline(always)]
fn parse_header_line<R: BufRead + ?Sized, Value, Error>(&mut self, buf_read: &mut R, field_name_with_colon_and_tab: &[u8], parser: impl FnOnce(&str) -> Result<Value, Error>) -> io::Result<Value>
{
self.read_until_line_feed(buf_read)?;
let token: &[u8] = field_name_with_colon_and_tab;
if unlikely!(self.length() < token.len())
{
return Err(invalid_data())
}
if unlikely!(&(self.as_ref())[0 .. token.len()] != token)
{
return Err(invalid_data())
}
let value =
{
let raw_value = &(self.as_ref())[token.len()..];
let raw_value_utf8 = from_utf8(raw_value).map_err(|_utf8_error| invalid_data())?;
parser(raw_value_utf8).map_err(|_parse_error| invalid_data())?
};
self.empty();
Ok(value)
}
#[inline(always)]
fn read_until_line_feed<R: BufRead + ?Sized>(&mut self, buf_read: &mut R) -> io::Result<usize>
{
self.read_until_delimiter_populating_buffer_with_bytes_read_excluding_delimiter(buf_read, b'\n')
}
fn read_until_delimiter_populating_buffer_with_bytes_read_excluding_delimiter<R: BufRead + ?Sized>(&mut self, buf_read: &mut R, delimiter: u8) -> io::Result<usize>;
fn extend_from_slice(&mut self, slice: &[u8]) -> io::Result<()>;
fn length(&self) -> usize;
fn empty(&mut self);
}
impl<A: Array<Item=u8>> ExtendFromSlice for ArrayVec<A>
{
#[inline(always)]
fn read_until_delimiter_populating_buffer_with_bytes_read_excluding_delimiter<R: BufRead + ?Sized>(&mut self, buf_read: &mut R, delimiter: u8) -> io::Result<usize>
{
let mut total_bytes_read = 0;
loop
{
let (used, is_delimited) =
{
let available_slice = match buf_read.fill_buf()
{
Ok(available_slice) => available_slice,
Err(error) => if error.kind() == ErrorKind::Interrupted
{
continue
}
else
{
return Err(error)
},
};
let delimiter_index = memchr(delimiter, available_slice);
let index = delimiter_index.unwrap_or(available_slice.len());
self.extend_from_slice(&available_slice[0 .. index])?;
(index, delimiter_index.is_some())
};
if used != 0
{
buf_read.consume(used);
total_bytes_read += used;
}
if is_delimited || used == 0
{
return Ok(total_bytes_read)
}
}
}
#[inline(always)]
fn extend_from_slice(&mut self, slice: &[u8]) -> io::Result<()>
{
let original_length = self.len();
let slice_length = slice.len();
let new_length = original_length + slice_length;
if new_length > self.capacity()
{
return Err(invalid_data())
}
let pointer = self.as_mut_slice().as_mut_ptr();
unsafe
{
pointer.add(original_length).copy_from_nonoverlapping(slice.as_ptr(), slice_length);
self.set_len(new_length)
}
Ok(())
}
#[inline(always)]
fn length(&self) -> usize
{
self.len()
}
#[inline(always)]
fn empty(&mut self)
{
self.clear()
}
}
| true
|
9218989a17725cac5234d6bd996fc730976a567a
|
Rust
|
ChristophWurst/diesel_many_to_many
|
/src/models.rs
|
UTF-8
| 799
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
use super::schema::images;
use super::schema::image_tags;
use super::schema::tags;
#[derive(Identifiable, Queryable)]
pub struct Image {
pub id: i32,
pub url: String,
}
#[derive(Insertable)]
#[diesel(table_name = images)]
pub struct NewImage<'a> {
pub url: &'a str,
}
#[derive(Identifiable, Queryable, Associations)]
#[diesel(belongs_to(Image))]
#[diesel(belongs_to(Tag))]
pub struct ImageTag {
pub id: i32,
pub image_id: i32,
pub tag_id: i32,
}
#[derive(Insertable)]
#[diesel(table_name = image_tags)]
pub struct NewImageTag {
pub image_id: i32,
pub tag_id: i32,
}
#[derive(Identifiable, Queryable)]
pub struct Tag {
pub id: i32,
pub label: String,
}
#[derive(Insertable)]
#[diesel(table_name = tags)]
pub struct NewTag<'a> {
pub label: &'a str,
}
| true
|
f3eacca8c77858702a1fa7f08425f0eadd912912
|
Rust
|
elsid/CodeBall
|
/tests/arena.rs
|
UTF-8
| 3,415
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#[test]
fn test_distance_and_normal_to_arena() {
use my_strategy::my_strategy::vec3::Vec3;
use my_strategy::examples::example_arena;
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(0.0, 0.0, 0.0)),
(0.0, Vec3::new(0.0, 1.0, 0.0))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(0.0, 10.0, 0.0)),
(10.0, Vec3::new(0.0, 1.0, 0.0))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(0.0, -10.0, 0.0)),
(-10.0, Vec3::new(0.0, 1.0, 0.0))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(100.0, 100.0, 0.0)),
(-109.18089343777659, Vec3::new(-0.6627595788049191, -0.7488322513769865, 0.0))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(100.0, 10.0, 0.0)),
(-70.0, Vec3::new(-1.0, 0.0, 0.0))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(0.0, 10.0, 100.0)),
(-50.08483775994799, Vec3::new(0.0, -0.056513312022655776, -0.9984018457335854))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(29.0, 10.0, 0.0)),
(1.0, Vec3::new(-1.0, 0.0, 0.0))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(-100.0, 100.0, 0.0)),
(-109.18089343777659, Vec3::new(0.6627595788049191, -0.7488322513769865, 0.0))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(0.0, 100.0, -100.0)),
(-104.0420478129973, Vec3::new(0.0, -0.8688174591210289, 0.49513253046682293))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(-100.0, 10.0, 0.0)),
(-70.0, Vec3::new(1.0, 0.0, 0.0))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(0.0, 10.0, -100.0)),
(-50.08483775994799, Vec3::new(0.0, -0.056513312022655776, 0.9984018457335854))
);
assert_eq!(
example_arena()
.distance_and_normal(Vec3::new(-24.42538595321975, 2.4677833504956497, 34.911123218207614)),
(1.997137504399881, Vec3::new(0.5800439587575246, 0.5306975301592753, -0.6179879751190503))
);
}
#[test]
fn test_projected_with_shift() {
use my_strategy::my_strategy::vec3::Vec3;
use my_strategy::examples::example_arena;
assert_eq!(
example_arena().projected_with_shift(Vec3::only_y(-1.0), 1.0),
Vec3::new(0.0, 1.0, 0.0),
);
assert_eq!(
example_arena().projected_with_shift(Vec3::new(30.0, -1.0, 0.0), 1.0),
Vec3::new(28.2, 1.4000000000000004, 0.0),
);
assert_eq!(
example_arena().projected_with_shift(Vec3::new(31.0, 5.0, 0.0), 1.0),
Vec3::new(29.0, 5.0, 0.0),
);
}
#[test]
fn test_get_touch_normal() {
use my_strategy::my_strategy::vec3::Vec3;
use my_strategy::examples::{GameType, example_arena, example_me, example_rules};
let rules = example_rules(GameType::TwoRobots);
assert_eq!(
example_arena().get_approximate_touch_normal(&example_me(GameType::TwoRobots, &rules)),
Some(Vec3::new(0.0, 1.0, 0.0)),
);
{
let mut robot = example_me(GameType::TwoRobots, &rules);
robot.y = 2.0;
assert_eq!(example_arena().get_approximate_touch_normal(&robot), None);
}
}
| true
|
6652cdc31f4607a62fdd62a576cc800938f228f2
|
Rust
|
osddeitf/janus-proxy-rust
|
/src/janus/core/ice.rs
|
UTF-8
| 1,003
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
use serde::{Serialize, Deserialize};
use serde_with::skip_serializing_none;
use super::apierror::*;
#[skip_serializing_none]
#[derive(Serialize, Deserialize)]
pub struct JanusIceTrickle {
#[allow(non_snake_case)]
sdpMid: Option<String>,
#[allow(non_snake_case)]
sdpMLineIndex: Option<u64>,
candidate: Option<String>,
// Trickle done
completed: Option<bool>
}
impl JanusIceTrickle {
pub fn validate(&self) -> Result<(), JanusError> {
if self.completed.is_some() {
return Ok(())
}
if self.sdpMid.is_none() && self.sdpMLineIndex.is_none() {
return Err(JanusError::new(JANUS_ERROR_MISSING_MANDATORY_ELEMENT, "Trickle error: missing mandatory element (sdpMid or sdpMLineIndex)".to_string()))
}
if self.candidate.is_none() {
return Err(JanusError::new(JANUS_ERROR_MISSING_MANDATORY_ELEMENT, "Trickle error: missing mandatory element (candidate)".to_string()))
}
Ok(())
}
}
| true
|
91fc3b69026174fd8ad7c3c8eaeb569ab2571bcb
|
Rust
|
mbandazian/cs4414
|
/ps1/src/ex1.rs
|
UTF-8
| 394
| 3.34375
| 3
|
[] |
no_license
|
// http://aml3.github.io/RustTutorial/html/01.html
fn match_exercise(val : (int, bool)) {
match val {
(y, x) if x && (y >= 20) && (y <= 26) => println!("true and in range"),
(_, x) if x => println!("true and out of range"),
(y, _) if (y >= 40) && (y <= 49) => println!("unknown and in big range"),
_ => println!("none"),
}
}
fn main() {
let x = (51, true);
match_exercise(x);
}
| true
|
c35f015aa32735bdd0d3eb9a663608d681f8222c
|
Rust
|
maksimsco/constriction
|
/src/lib.rs
|
UTF-8
| 32,005
| 3.171875
| 3
|
[
"BSL-1.0",
"MIT",
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
//! Entropy Coding Primitives for Research and Production
//!
//! The `constriction` crate provides a set of composable entropy coding algorithms with a
//! focus on correctness, versatility, ease of use, compression performance, and
//! computational efficiency. The goals of `constriction` are to three-fold:
//!
//! 1. **to facilitate research on novel lossless and lossy compression methods** by
//! providing a *composable* set of entropy coding primitives rather than a rigid
//! implementation of a single preconfigured method;
//! 2. **to simplify the transition from research code to production software** by exposing
//! the exact same functionality via both a Python API (for rapid prototyping on research
//! code) and a Rust API (for turning successful prototypes into production); and
//! 3. **to serve as a teaching resource** by providing a wide range of entropy coding
//! algorithms within a single consistent framework, thus making the various algorithms
//! easily discoverable and comparable on example models and data. [Additional teaching
//! material](https://robamler.github.io/teaching/compress21/) is being made publicly
//! available as a by-product of an ongoing university course on data compression with
//! deep probabilistic models.
//!
//! For an example of a compression codec that started as research code in Python and was
//! then deployed as a fast and dependency-free WebAssembly module using `constriction`'s
//! Rust API, have a look at [The Linguistic Flux
//! Capacitor](https://robamler.github.io/linguistic-flux-capacitor).
//!
//! # Project Status
//!
//! We currently provide implementations of the following entropy coding algorithms:
//!
//! - **Asymmetric Numeral Systems (ANS):** a fast modern entropy coder with near-optimal
//! compression effectiveness that supports advanced use cases like bits-back coding.
//! - **Range Coding:** a computationally efficient variant of Arithmetic Coding, that has
//! essentially the same compression effectiveness as ANS Coding but operates as a queue
//! ("first in first out"), which makes it preferable for autoregressive models.
//! - **Chain Coding:** an experimental new entropy coder that combines the (net)
//! effectiveness of stream codes with the locality of symbol codes; it is meant for
//! experimental new compression approaches that perform joint inference, quantization,
//! and bits-back coding in an end-to-end optimization. This experimental coder is mainly
//! provided to prove to ourselves that the API for encoding and decoding, which is shared
//! across all stream coders, is flexible enough to express complex novel tasks.
//! - **Huffman Coding:** a well-known symbol code, mainly provided here for teaching
//! purpose; you'll usually want to use a stream code like ANS or Range Coding instead
//! since symbol codes can have a considerable overhead on the bitrate, especially in the
//! regime of low entropy per symbol, which is common in machine-learning based
//! compression methods.
//!
//! Further, `constriction` provides implementations of common probability distributions in
//! fixed-point arithmetic, which can be used as entropy models in either of the above
//! stream codes. The crate also provides adapters for turning custom probability
//! distributions into exactly invertible fixed-point arithmetic.
//!
//! The provided implementations of entropy coding algorithms and probability distributions
//! are extensively tested and should be considered reliable (except for the still
//! experimental Chain Coder). However, their APIs may change in future versions of
//! `constriction` if more user experience reveals any shortcomings of the current APIs in
//! terms of ergonomics. Please [file an
//! issue](https://github.com/bamler-lab/constriction/issues) if you run into a scenario
//! where the current APIs are suboptimal.
//!
//! # Quick Start With the Rust API
//!
//! You are currently reading the documentation of `constriction`'s Rust API. If Rust is not
//! your language of choice then head over to the [Python API
//! Documentation](https://bamler-lab.github.io/constriction/apidoc/python/). The Rust API
//! provides efficient and composable entropy coding primitives that can be adjusted to a
//! fine degree of detail using type parameters and const generics (type aliases with sane
//! defaults for all generic parameters are provided as a guidance). The Python API exposes
//! the most common use cases of these entropy coding primitives to an environment that
//! feels more natural to many data scientists.
//!
//! ## Setup
//!
//! To use `constriction` in your Rust project, just add the following line to the
//! `[dependencies]` section of your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! constriction = "0.2"
//! ```
//!
//! ## System Requirements
//!
//! `constriction` requires Rust version 1.51 or later for its use of the
//! `min_const_generics` feature. If you have an older version of Rust, update to the latest
//! version by running `rustup update stable`.
//!
//! ## Encoding Example
//!
//! In this example, we'll encode some symbols using a quantized Gaussian distribution as
//! entropy model. Each symbol will be modeled by a quantized Gaussian with a different
//! mean and standard deviation (so that the example is not too simplistic). We'll use the
//! `probability` crate for the Gaussian distributions, so also add the following dependency
//! to your `Cargo.toml`:
//!
//! ```toml
//! probability = "0.17"
//! ```
//!
//! Now, let's encode (i.e., compress) some symbols. We'll use an Asymmetric Numeral Systems
//! (ANS) Coder here for its speed and compression performance. We'll discuss how you could
//! replace the ANS Coder with a Range Coder or a symbol code like Huffman Coding
//! [below](#exercise).
//!
//! ```
//! use constriction::stream::{stack::DefaultAnsCoder, model::DefaultLeakyQuantizer};
//! use probability::distribution::Gaussian;
//!
//! fn encode_sample_data() -> Vec<u32> {
//! // Create an empty ANS Coder with default word and state size:
//! let mut coder = DefaultAnsCoder::new();
//!
//! // Some made up data and entropy models for demonstration purpose:
//! let symbols = [23i32, -15, 78, 43, -69];
//! let means = [35.2, -1.7, 30.1, 71.2, -75.1];
//! let stds = [10.1, 25.3, 23.8, 35.4, 3.9];
//!
//! // Create an adapter that integrates 1-d probability density functions over bins
//! // `[n - 0.5, n + 0.5)` for all integers `n` from `-100` to `100` using fixed point
//! // arithmetic with default precision, guaranteeing a nonzero probability for each bin:
//! let quantizer = DefaultLeakyQuantizer::new(-100..=100);
//!
//! // Encode the data (in reverse order, since ANS Coding operates as a stack):
//! coder.encode_symbols_reverse(
//! symbols.iter().zip(&means).zip(&stds).map(
//! |((&sym, &mean), &std)| (sym, quantizer.quantize(Gaussian::new(mean, std)))
//! )).unwrap();
//!
//! // Retrieve the compressed representation (filling it up to full words with zero bits).
//! coder.into_compressed().unwrap()
//! }
//!
//! assert_eq!(encode_sample_data(), [0x421C_7EC3, 0x000B_8ED1]);
//! ```
//!
//! ## Decoding Example
//!
//! Now, let's reconstruct the sample data from its compressed representation.
//!
//! ```
//! use constriction::stream::{stack::DefaultAnsCoder, model::DefaultLeakyQuantizer, Decode};
//! use probability::distribution::Gaussian;
//!
//! fn decode_sample_data(compressed: Vec<u32>) -> Vec<i32> {
//! // Create an ANS Coder with default word and state size from the compressed data:
//! // (ANS uses the same type for encoding and decoding, which makes the method very flexible
//! // and allows interleaving small encoding and decoding chunks, e.g., for bits-back coding.)
//! let mut coder = DefaultAnsCoder::from_compressed(compressed).unwrap();
//!
//! // Same entropy models and quantizer we used for encoding:
//! let means = [35.2, -1.7, 30.1, 71.2, -75.1];
//! let stds = [10.1, 25.3, 23.8, 35.4, 3.9];
//! let quantizer = DefaultLeakyQuantizer::new(-100..=100);
//!
//! // Decode the data:
//! coder.decode_symbols(
//! means.iter().zip(&stds).map(
//! |(&mean, &std)| quantizer.quantize(Gaussian::new(mean, std))
//! )).collect::<Result<Vec<_>, _>>().unwrap()
//! }
//!
//! assert_eq!(decode_sample_data(vec![0x421C_7EC3, 0x000B_8ED1]), [23, -15, 78, 43, -69]);
//! ```
//!
//! ## Exercise
//!
//! Try out the above examples and verify that decoding reconstructs the original data. Then
//! see how easy `constriction` makes it to replace the ANS Coder with a Range Coder by
//! making the following substitutions:
//!
//! **In the encoder,**
//!
//! - replace `constriction::stream::stack::DefaultAnsCoder` with
//! `constriction::stream::queue::DefaultRangeEncoder`; and
//! - replace `coder.encode_symbols_reverse` with `coder.encode_symbols` (you no longer need
//! to encode symbols in reverse order since Range Coding operates as a queue, i.e.,
//! first-in-first-out). You'll also have to add the line
//! `use constriction::stream::Encode;` to the top of the file to bring the trait method
//! `encode_symbols` into scope.
//!
//! **In the decoder,**
//!
//! - replace `constriction::stream::stack::DefaultAnsCoder` with
//! `constriction::stream::queue::DefaultRangeDecoder` (note that Range Coding
//! distinguishes between an encoder and a decoder type since the encoder writes to the
//! back while the decoder reads from the front; by contrast, ANS Coding is a stack, i.e.,
//! it reads and writes at the same position and allows interleaving reads and writes).
//!
//! *Remark:* You could also use a symbol code like Huffman Coding (see module [`symbol`])
//! but that would have considerably worse compression performance, especially on large
//! files, since symbol codes always emit an integer number of bits per compressed symbol,
//! even if the information content of the symbol is a fractional number (stream codes like
//! ANS and Range Coding *effectively* emit a fractional number of bits per symbol since
//! they amortize over several symbols).
//!
//! The above replacements should lead you to something like this:
//!
//! ```
//! use constriction::stream::{
//! model::DefaultLeakyQuantizer,
//! queue::{DefaultRangeEncoder, DefaultRangeDecoder},
//! Encode, Decode,
//! };
//! use probability::distribution::Gaussian;
//!
//! fn encode_sample_data() -> Vec<u32> {
//! // Create an empty Range Encoder with default word and state size:
//! let mut encoder = DefaultRangeEncoder::new();
//!
//! // Same made up data, entropy models, and quantizer as in the ANS Coding example above:
//! let symbols = [23i32, -15, 78, 43, -69];
//! let means = [35.2, -1.7, 30.1, 71.2, -75.1];
//! let stds = [10.1, 25.3, 23.8, 35.4, 3.9];
//! let quantizer = DefaultLeakyQuantizer::new(-100..=100);
//!
//! // Encode the data (this time in normal order, since Range Coding is a queue):
//! encoder.encode_symbols(
//! symbols.iter().zip(&means).zip(&stds).map(
//! |((&sym, &mean), &std)| (sym, quantizer.quantize(Gaussian::new(mean, std)))
//! )).unwrap();
//!
//! // Retrieve the (sealed up) compressed representation.
//! encoder.into_compressed().unwrap()
//! }
//!
//! fn decode_sample_data(compressed: Vec<u32>) -> Vec<i32> {
//! // Create a Range Decoder with default word and state size from the compressed data:
//! let mut decoder = DefaultRangeDecoder::from_compressed(compressed).unwrap();
//!
//! // Same entropy models and quantizer we used for encoding:
//! let means = [35.2, -1.7, 30.1, 71.2, -75.1];
//! let stds = [10.1, 25.3, 23.8, 35.4, 3.9];
//! let quantizer = DefaultLeakyQuantizer::new(-100..=100);
//!
//! // Decode the data:
//! decoder.decode_symbols(
//! means.iter().zip(&stds).map(
//! |(&mean, &std)| quantizer.quantize(Gaussian::new(mean, std))
//! )).collect::<Result<Vec<_>, _>>().unwrap()
//! }
//!
//! let compressed = encode_sample_data();
//!
//! // We'll get a different compressed representation than in the ANS Coding
//! // example because we're using a different entropy coding algorithm ...
//! assert_eq!(compressed, [0x1C31EFEB, 0x87B430DA]);
//!
//! // ... but as long as we decode with the matching algorithm we can still reconstruct the data:
//! assert_eq!(decode_sample_data(compressed), [23, -15, 78, 43, -69]);
//! ```
//!
//! # Where to Go Next?
//!
//! If you already have an entropy model and you just want to encode and decode some
//! sequence of symbols then you can probably start by adjusting the above
//! [examples](#encoding-example) to your needs. Or have a closer look at the [`stream`]
//! module.
//!
//! If you're still new to the concept of entropy coding then check out the [teaching
//! material](https://robamler.github.io/teaching/compress21/).
#![no_std]
#![warn(rust_2018_idioms, missing_debug_implementations)]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
#[cfg(feature = "pybindings")]
mod pybindings;
pub mod backends;
pub mod stream;
pub mod symbol;
use core::{
convert::Infallible,
fmt::{Binary, Debug, Display, LowerHex, UpperHex},
hash::Hash,
num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize},
};
use num::{
cast::AsPrimitive,
traits::{WrappingAdd, WrappingSub},
PrimInt, Unsigned,
};
// READ WRITE SEMANTICS =======================================================
/// A trait for marking how reading and writing order relate to each other.
///
/// This is currently only used in the [`backends`] module. Future versions of
/// `constriction` may expand its use to frontends.
pub trait Semantics: Default {}
/// Zero sized marker trait for last-in-first-out read/write [`Semantics`]
///
/// This type typically only comes up in advanced use cases that are generic over read/write
/// semantics. If you are looking for an entropy coder that operates as a stack, check out
/// the module [`stream::stack`].
#[derive(Debug, Default)]
pub struct Stack {}
impl Semantics for Stack {}
/// Zero sized marker trait for first-in-first-out read/write [`Semantics`]
///
/// This type typically only comes up in advanced use cases that are generic over read/write
/// semantics. If you are looking for an entropy coder that operates as a queue, check out
/// the module [`stream::queue`].
#[derive(Debug, Default)]
pub struct Queue {}
impl Semantics for Queue {}
// GENERIC ERROR TYPES ========================================================
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CoderError<FrontendError, BackendError> {
Frontend(FrontendError),
Backend(BackendError),
}
impl<FrontendError, BackendError> CoderError<FrontendError, BackendError> {
pub fn map_frontend<E>(self, f: impl Fn(FrontendError) -> E) -> CoderError<E, BackendError> {
match self {
Self::Frontend(err) => CoderError::Frontend(f(err)),
Self::Backend(err) => CoderError::Backend(err),
}
}
pub fn map_backend<E>(self, f: impl Fn(BackendError) -> E) -> CoderError<FrontendError, E> {
match self {
Self::Backend(err) => CoderError::Backend(f(err)),
Self::Frontend(err) => CoderError::Frontend(err),
}
}
}
impl<BackendError: Display, FrontendError: Display> Display
for CoderError<FrontendError, BackendError>
{
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Frontend(err) => write!(f, "Invalid compressed data: {}", err),
Self::Backend(err) => write!(f, "Error while reading compressed data: {}", err),
}
}
}
#[cfg(feature = "std")]
impl<FrontendError: std::error::Error + 'static, BackendError: std::error::Error + 'static>
std::error::Error for CoderError<FrontendError, BackendError>
{
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Frontend(source) => Some(source),
Self::Backend(source) => Some(source),
}
}
}
impl<FrontendError, BackendError> From<BackendError> for CoderError<FrontendError, BackendError> {
fn from(read_error: BackendError) -> Self {
Self::Backend(read_error)
}
}
impl<FrontendError> CoderError<FrontendError, Infallible> {
fn into_frontend_error(self) -> FrontendError {
match self {
CoderError::Frontend(frontend_error) => frontend_error,
CoderError::Backend(infallible) => match infallible {},
}
}
}
type DefaultEncoderError<BackendError> = CoderError<DefaultEncoderFrontendError, BackendError>;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DefaultEncoderFrontendError {
/// Tried to encode a symbol with zero probability under the used entropy model.
///
/// This error can usually be avoided by using a "leaky" distribution, as the
/// entropy model, i.e., a distribution that assigns a nonzero probability to all
/// symbols within a finite domain. Leaky distributions can be constructed with,
/// e.g., a [`LeakyQuantizer`](models/struct.LeakyQuantizer.html) or with
/// [`LeakyCategorical::from_floating_point_probabilities`](
/// models/struct.LeakyCategorical.html#method.from_floating_point_probabilities).
ImpossibleSymbol,
}
impl Display for DefaultEncoderFrontendError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::ImpossibleSymbol => write!(
f,
"Tried to encode symbol that has zero probability under the used entropy model."
),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for DefaultEncoderFrontendError {}
impl DefaultEncoderFrontendError {
#[inline(always)]
const fn into_coder_error<BackendError>(self) -> DefaultEncoderError<BackendError> {
DefaultEncoderError::Frontend(self)
}
}
/// Trait for coders or backends that *might* implement [`Pos`] and/or [`Seek`]
///
/// If a type implements `PosSeek` then that doesn't necessarily mean that it also
/// implements [`Pos`] or [`Seek`]. Implementing `PosSeek` only fixes the common `Position`
/// type that is used *if* the type implements `Pos` and/or `Seek`.
pub trait PosSeek {
type Position: Clone;
}
/// A trait for entropy coders that keep track of their current position within the
/// compressed data.
///
/// This is the counterpart of [`Seek`]. Call [`Pos::pos`] to record
/// "snapshots" of an entropy coder, and then call [`Seek::seek`] at a later time
/// to jump back to these snapshots. See examples in the documentations of [`Seek`]
/// and [`Seek::seek`].
pub trait Pos: PosSeek {
/// Returns the position in the compressed data, in units of `Word`s.
///
/// It is up to the entropy coder to define what constitutes the beginning and end
/// positions within the compressed data (for example, a [`AnsCoder`] begins encoding
/// at position zero but it begins decoding at position `ans.buf().len()`).
///
/// [`AnsCoder`]: stack::AnsCoder
fn pos(&self) -> Self::Position;
}
/// A trait for entropy coders that support random access.
///
/// This is the counterpart of [`Pos`]. While [`Pos::pos`] can be used to
/// record "snapshots" of an entropy coder, [`Seek::seek`] can be used to jump to these
/// recorded snapshots.
///
/// Not all entropy coders that implement `Pos` also implement `Seek`. For example,
/// [`DefaultAnsCoder`] implements `Pos` but it doesn't implement `Seek` because it
/// supports both encoding and decoding and therefore always operates at the head. In
/// such a case one can usually obtain a seekable entropy coder in return for
/// surrendering some other property. For example, `DefaultAnsCoder` provides the methods
/// [`seekable_decoder`] and [`into_seekable_decoder`] that return a decoder which
/// implements `Seek` but which can no longer be used for encoding (i.e., it doesn't
/// implement [`Encode`]).
///
/// # Example
///
/// ```
/// use constriction::stream::{
/// model::DefaultContiguousCategoricalEntropyModel, stack::DefaultAnsCoder, Decode
/// };
/// use constriction::{Pos, Seek};
///
/// // Create a `AnsCoder` encoder and an entropy model:
/// let mut ans = DefaultAnsCoder::new();
/// let probabilities = vec![0.03, 0.07, 0.1, 0.1, 0.2, 0.2, 0.1, 0.15, 0.05];
/// let entropy_model = DefaultContiguousCategoricalEntropyModel
/// ::from_floating_point_probabilities(&probabilities).unwrap();
///
/// // Encode some symbols in two chunks and take a snapshot after each chunk.
/// let symbols1 = vec![8, 2, 0, 7];
/// ans.encode_iid_symbols_reverse(&symbols1, &entropy_model).unwrap();
/// let snapshot1 = ans.pos();
///
/// let symbols2 = vec![3, 1, 5];
/// ans.encode_iid_symbols_reverse(&symbols2, &entropy_model).unwrap();
/// let snapshot2 = ans.pos();
///
/// // As discussed above, `DefaultAnsCoder` doesn't impl `Seek` but we can get a decoder that does:
/// let mut seekable_decoder = ans.as_seekable_decoder();
///
/// // `seekable_decoder` is still a `AnsCoder`, so decoding would start with the items we encoded
/// // last. But since it implements `Seek` we can jump ahead to our first snapshot:
/// seekable_decoder.seek(snapshot1);
/// let decoded1 = seekable_decoder
/// .decode_iid_symbols(4, &entropy_model)
/// .collect::<Result<Vec<_>, _>>()
/// .unwrap();
/// assert_eq!(decoded1, symbols1);
///
/// // We've reached the end of the compressed data ...
/// assert!(seekable_decoder.is_empty());
///
/// // ... but we can still jump to somewhere else and continue decoding from there:
/// seekable_decoder.seek(snapshot2);
///
/// // Creating snapshots didn't mutate the coder, so we can just decode through `snapshot1`:
/// let decoded_both = seekable_decoder.decode_iid_symbols(7, &entropy_model).map(Result::unwrap);
/// assert!(decoded_both.eq(symbols2.into_iter().chain(symbols1)));
/// assert!(seekable_decoder.is_empty()); // <-- We've reached the end again.
/// ```
///
/// [`DefaultAnsCoder`]: stack::DefaultAnsCoder
/// [`seekable_decoder`]: stack::AnsCoder::seekable_decoder
/// [`into_seekable_decoder`]: stack::AnsCoder::into_seekable_decoder
pub trait Seek: PosSeek {
/// Jumps to a given position in the compressed data.
///
/// The argument `pos` is the same pair of values returned by
/// [`Pos::pos`], i.e., it is a tuple of the position in the compressed
/// data and the `State` to which the entropy coder should be restored. Both values
/// are absolute (i.e., seeking happens independently of the current state or
/// position of the entropy coder). The position is measured in units of
/// `Word`s (see second example below where we manipulate a position
/// obtained from `Pos::pos` in order to reflect a manual reordering of
/// the `Word`s in the compressed data).
///
/// # Examples
///
/// The method takes the position and state as a tuple rather than as independent
/// method arguments so that one can simply pass in the tuple obtained from
/// [`Pos::pos`] as sketched below:
///
/// ```
/// // Step 1: Obtain an encoder and encode some data (omitted for brevity) ...
/// # use constriction::{stream::stack::DefaultAnsCoder, Pos, Seek};
/// # let encoder = DefaultAnsCoder::new();
///
/// // Step 2: Take a snapshot by calling `Pos::pos`:
/// let snapshot = encoder.pos(); // <-- Returns a tuple `(pos, state)`.
///
/// // Step 3: Encode some more data and then obtain a decoder (omitted for brevity) ...
/// # let mut decoder = encoder.as_seekable_decoder();
///
/// // Step 4: Jump to snapshot by calling `Seek::seek`:
/// decoder.seek(snapshot); // <-- No need to deconstruct `snapshot` into `(pos, state)`.
/// ```
///
/// For more fine-grained control, one may want to assemble the tuple
/// `pos` manually. For example, a [`DefaultAnsCoder`] encodes data from
/// front to back and then decodes the data in the reverse direction from back to
/// front. Decoding from back to front may be inconvenient in some use cases, so one
/// might prefer to instead reverse the order of the `Word`s once encoding
/// is finished, and then decode them in the more natural direction from front to
/// back. Reversing the compressed data changes the position of each
/// `Word`, and so any positions obtained from `Pos` need to be adjusted
/// accordingly before they may be passed to `seek`, as in the following example:
///
/// ```
/// use constriction::{
/// stream::{model::LeakyQuantizer, stack::{DefaultAnsCoder, AnsCoder}, Decode},
/// Pos, Seek
/// };
///
/// // Construct a `DefaultAnsCoder` for encoding and an entropy model:
/// let mut encoder = DefaultAnsCoder::new();
/// let quantizer = LeakyQuantizer::<_, _, u32, 24>::new(-100..=100);
/// let entropy_model = quantizer.quantize(probability::distribution::Gaussian::new(0.0, 10.0));
///
/// // Encode two chunks of symbols and take a snapshot in-between:
/// encoder.encode_iid_symbols_reverse(-100..40, &entropy_model).unwrap();
/// let (mut snapshot_pos, snapshot_state) = encoder.pos();
/// encoder.encode_iid_symbols_reverse(50..101, &entropy_model).unwrap();
///
/// // Obtain compressed data, reverse it, and create a decoder that reads it from front to back:
/// let mut compressed = encoder.into_compressed().unwrap();
/// compressed.reverse();
/// snapshot_pos = compressed.len() - snapshot_pos; // <-- Adjusts the snapshot position.
/// let mut decoder = AnsCoder::from_reversed_compressed(compressed).unwrap();
///
/// // Since we chose to encode onto a stack, decoding yields the last encoded chunk first:
/// assert_eq!(decoder.decode_symbol(&entropy_model).unwrap(), 50);
/// assert_eq!(decoder.decode_symbol(&entropy_model).unwrap(), 51);
///
/// // To jump to our snapshot, we have to use the adjusted `snapshot_pos`:
/// decoder.seek((snapshot_pos, snapshot_state));
/// assert!(decoder.decode_iid_symbols(140, &entropy_model).map(Result::unwrap).eq(-100..40));
/// assert!(decoder.is_empty()); // <-- We've reached the end of the compressed data.
/// ```
///
/// [`DefaultAnsCoder`]: stack::DefaultAnsCoder
#[allow(clippy::result_unit_err)]
fn seek(&mut self, pos: Self::Position) -> Result<(), ()>;
}
/// A trait for bit strings of fixed (and usually small) length.
///
/// Short fixed-length bit strings are fundamental building blocks of efficient entropy
/// coding algorithms. They are currently used for the following purposes:
/// - to represent the smallest unit of compressed data (see [`Code::Word`]);
/// - to represent probabilities in fixed point arithmetic (see
/// [`EntropyModel::Probability`]); and
/// - the internal state of entropy coders (see [`Code::State`]) is typically comprised of
/// one or more `BitArray`s, although this is not a requirement.
///
/// This trait is implemented on all primitive unsigned integer types. It is not recommended
/// to implement this trait for custom types since coders will assume, for performance
/// considerations, that `BitArray`s can be represented and manipulated efficiently in
/// hardware.
///
/// # Safety
///
/// This trait is marked `unsafe` so that entropy coders may rely on the assumption that all
/// `BitArray`s have precisely the same behavior as builtin unsigned integer types, and that
/// [`BitArray::BITS`] has the correct value.
pub unsafe trait BitArray:
PrimInt
+ Unsigned
+ WrappingAdd
+ WrappingSub
+ LowerHex
+ UpperHex
+ Binary
+ Default
+ Copy
+ Display
+ Debug
+ Eq
+ Hash
+ 'static
{
/// The (fixed) length of the `BitArray` in bits.
///
/// Defaults to `8 * core::mem::size_of::<Self>()`, which is suitable for all
/// primitive unsigned integers.
///
/// This could arguably be called `LEN` instead, but that may be confusing since
/// "lengths" are typically not measured in bits in the Rust ecosystem.
const BITS: usize = 8 * core::mem::size_of::<Self>();
type NonZero: NonZeroBitArray<Base = Self>;
#[inline(always)]
fn into_nonzero(self) -> Option<Self::NonZero> {
Self::NonZero::new(self)
}
/// # Safety
///
/// The provided value must be nonzero.
#[inline(always)]
unsafe fn into_nonzero_unchecked(self) -> Self::NonZero {
Self::NonZero::new_unchecked(self)
}
}
#[inline(always)]
fn wrapping_pow2<T: BitArray>(exponent: usize) -> T {
if exponent >= T::BITS {
T::zero()
} else {
T::one() << exponent
}
}
pub unsafe trait NonZeroBitArray: Copy + Display + Debug + Eq + Hash + 'static {
type Base: BitArray<NonZero = Self>;
fn new(n: Self::Base) -> Option<Self>;
/// # Safety
///
/// The provided value `n` must be nonzero.
unsafe fn new_unchecked(n: Self::Base) -> Self;
fn get(self) -> Self::Base;
}
/// Iterates from most significant to least significant bits in chunks but skips any
/// initial zero chunks.
fn bit_array_to_chunks_truncated<Data, Chunk>(
data: Data,
) -> impl Iterator<Item = Chunk> + ExactSizeIterator + DoubleEndedIterator
where
Data: BitArray + AsPrimitive<Chunk>,
Chunk: BitArray,
{
(0..(Data::BITS - data.leading_zeros() as usize))
.step_by(Chunk::BITS)
.rev()
.map(move |shift| (data >> shift).as_())
}
macro_rules! unsafe_impl_bit_array {
($(($base:ty, $non_zero:ty)),+ $(,)?) => {
$(
unsafe impl BitArray for $base {
type NonZero = $non_zero;
}
unsafe impl NonZeroBitArray for $non_zero {
type Base = $base;
#[inline(always)]
fn new(n: Self::Base) -> Option<Self> {
Self::new(n)
}
#[inline(always)]
unsafe fn new_unchecked(n: Self::Base) -> Self {
Self::new_unchecked(n)
}
#[inline(always)]
fn get(self) -> Self::Base {
let non_zero = self.get();
unsafe {
// SAFETY: This is trivially safe because `non_zero` came from a
// `NonZero` type. We really shouldn't have to give the compiler
// this hint but removing it leads to a massive (~30%) performance
// regression on our benchmarks (TODO: file rust bug).
if non_zero == num::zero::<Self::Base>() {
core::hint::unreachable_unchecked();
} else {
non_zero
}
}
}
}
)+
};
}
unsafe_impl_bit_array!(
(u8, NonZeroU8),
(u16, NonZeroU16),
(u32, NonZeroU32),
(u64, NonZeroU64),
(u128, NonZeroU128),
(usize, NonZeroUsize),
);
pub trait UnwrapInfallible<T> {
fn unwrap_infallible(self) -> T;
}
impl<T> UnwrapInfallible<T> for Result<T, Infallible> {
fn unwrap_infallible(self) -> T {
match self {
Ok(x) => x,
Err(infallible) => match infallible {},
}
}
}
impl<T> UnwrapInfallible<T> for Result<T, CoderError<Infallible, Infallible>> {
fn unwrap_infallible(self) -> T {
match self {
Ok(x) => x,
Err(infallible) => match infallible {
CoderError::Backend(infallible) => match infallible {},
CoderError::Frontend(infallible) => match infallible {},
},
}
}
}
| true
|
94d37f33e17bfc5c916da17fd30851226185f3b2
|
Rust
|
micolore/hello-rust-2017
|
/struct_rust_2/src/main.rs
|
UTF-8
| 2,130
| 3.890625
| 4
|
[] |
no_license
|
fn main() {
println!("Hello, world!");
}
# struct User {
# username: String,
# email: String,
# sign_in_count: u64,
# active: bool,
# }
#
fn build_user(email: String, username: String) -> User {
User {
email: email,
username: username,
active: true,
sign_in_count: 1,
}
}
//two init field 变量与字段同名时的字段初始化语法
# struct User {
# username: String,
# email: String,
# sign_in_count: u64,
# active: bool,
# }
#
fn build_user(email: String, username: String) -> User {
User {
email,
username,
active: true,
sign_in_count: 1,
}
}
//使用结构体更新语法从其他对象创建对象
# struct User {
# username: String,
# email: String,
# sign_in_count: u64,
# active: bool,
# }
#
# let user1 = User {
# email: String::from("someone@example.com"),
# username: String::from("someusername123"),
# active: true,
# sign_in_count: 1,
# };
#
let user2 = User {
email: String::from("another@example.com"),
username: String::from("anotherusername567"),
active: user1.active,
sign_in_count: user1.sign_in_count,
};
//结构体更新语法利用.. 以指定未显式设置的字段应有与给定实例对应字段相同的值
# struct User {
# username: String,
# email: String,
# sign_in_count: u64,
# active: bool,
# }
#
# let user1 = User {
# email: String::from("someone@example.com"),
# username: String::from("someusername123"),
# active: true,
# sign_in_count: 1,
# };
#
let user2 = User {
email: String::from("another@example.com"),
username: String::from("anotherusername567"),
..user1
};
//使用没有命名字段的元组结构体创建不同的类型
struct Color(i32, i32, i32);
struct Point(i32, i32, i32);
let black = Color(0, 0, 0);
let origin = Point(0, 0, 0);
//没有任何字段的类单元结构体
//他们被称为 类单元结构体(unit-like structs)因为他们类似于 () ,即 unit 类型
struct User {
username: &str,
email: &str,
sign_in_count: u64,
active: bool,
}
fn main() {
let user1 = User {
email: "someone@example.com",
username: "someusername123",
active: true,
sign_in_count: 1,
};
}
| true
|
14097892dbda7332e4736b8e6be70ce8c238b4cc
|
Rust
|
linde12/advent-of-code-2020
|
/6/src/part2.rs
|
UTF-8
| 1,193
| 3.421875
| 3
|
[] |
no_license
|
use std::collections::HashMap;
pub fn solve(input: &str) -> usize {
let groups: Vec<&str> = input.split("\n\n").collect();
let group_answers: Vec<usize> = groups
.iter()
.map(|group| {
let mut answers: HashMap<char, usize> = HashMap::new();
let n_people_in_group = group.split_whitespace().count();
// Set how many people in the group answered "yes" to the same questions
group
.chars()
.skip_while(|c| c.is_whitespace())
.for_each(|ch| {
answers.entry(ch).and_modify(|e| *e += 1).or_insert(1);
});
// Count the amount of questions everyone in the group answered "yes" to
let c = answers
.iter()
.filter(|(_ch, n_ans)| **n_ans == n_people_in_group)
.count();
c
})
.collect();
// For each group, get the sum of all the questions everyone answered "yes" to
group_answers.iter().sum()
}
#[test]
fn test_2() {
let input = include_str!("./input_test.txt").trim();
let result = solve(&input);
assert_eq!(result, 6)
}
| true
|
2eb79eb96ecf659b354bf4e8d2416f755e2c4ad1
|
Rust
|
iorveth/CHIP8Emulator
|
/src/ram.rs
|
UTF-8
| 1,407
| 3.328125
| 3
|
[] |
no_license
|
pub struct Ram {
pub bytes: [u8; 4096]
}
impl Ram {
fn write_sprites(bytes: &mut [u8;4096]) -> &mut [u8;4096] {
let sprites: [[u8;5]; 16] = [
[0xF0, 0x90, 0x90, 0x90, 0xF0],
[0x20, 0x60, 0x20, 0x20, 0x70],
[0xF0, 0x10, 0xF0, 0x80, 0xF0],
[0xF0, 0x10, 0xF0, 0x10, 0xF0],
[0x90, 0x90, 0xF0, 0x10, 0x10],
[0xF0, 0x80, 0xF0, 0x10, 0xF0],
[0xF0, 0x80, 0xF0, 0x90, 0xF0],
[0xF0, 0x10, 0x20, 0x40, 0x40],
[0xF0, 0x90, 0xF0, 0x90, 0xF0],
[0xF0, 0x90, 0xF0, 0x10, 0xF0],
[0xF0, 0x90, 0xF0, 0x90, 0x90],
[0xE0, 0x90, 0xE0, 0x90, 0xE0],
[0xF0, 0x80, 0x80, 0x80, 0xF0],
[0xE0, 0x90, 0x90, 0x90, 0xE0],
[0xF0, 0x80, 0xF0, 0x80, 0xF0],
[0xF0, 0x80, 0xF0, 0x80, 0x80],
];
let mut i = 0;
for sprite in sprites.iter(){
for value in sprite {
bytes[i] = *value;
i+=1;
}
}
bytes
}
pub fn new() -> Ram {
let mut bytes: [u8;4096] = [0;4096];
Ram::write_sprites(& mut bytes);
Ram {bytes}
}
pub fn write_byte(&mut self, position: usize, value: u8) {
self.bytes[position] = value;
}
pub fn read_byte(&self, position: usize) -> u8 {
self.bytes[position]
}
}
| true
|
ac38299c7f293001d5269054ad91e1bdd9173db8
|
Rust
|
fanzeyi/prjfs-rs
|
/src/conv.rs
|
UTF-8
| 886
| 2.734375
| 3
|
[] |
no_license
|
use std::ffi::{OsStr, OsString};
use std::iter::once;
use std::os::windows::ffi::{OsStrExt, OsStringExt};
use winapi::um::{winbase::lstrlenW, winnt::PCWSTR};
pub struct WStr {
data: Vec<u16>,
}
impl WStr {
pub fn as_ptr(&self) -> PCWSTR {
self.data.as_ptr()
}
}
pub trait WStrExt {
fn to_wstr(&self) -> WStr;
}
impl<T> WStrExt for T
where
T: AsRef<OsStr>,
{
fn to_wstr(&self) -> WStr {
let data = self
.as_ref()
.encode_wide()
.chain(once(0))
.collect::<Vec<u16>>();
WStr { data }
}
}
pub trait RawWStrExt {
fn to_os(&self) -> OsString;
}
impl RawWStrExt for PCWSTR {
fn to_os(&self) -> OsString {
let length = unsafe { lstrlenW(*self) as usize };
let wstr = unsafe { std::slice::from_raw_parts(*self, length) };
OsString::from_wide(wstr)
}
}
| true
|
505df20c87a69d2077ddd59c3fed2fa59d647907
|
Rust
|
hosseind88/SimpleSquoosh
|
/rust-package/src/lib.rs
|
UTF-8
| 2,381
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
mod utils;
use std::mem;
use std::os::raw::c_void;
use std::slice;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
// In order to work with the memory we expose (de)allocation methods
#[no_mangle]
pub extern "C" fn alloc(size: usize) -> *mut c_void {
let mut buf = Vec::with_capacity(size);
let ptr = buf.as_mut_ptr();
mem::forget(buf);
return ptr as *mut c_void;
}
#[no_mangle]
pub extern "C" fn dealloc(ptr: *mut c_void, cap: usize) {
unsafe {
let _buf = Vec::from_raw_parts(ptr, 0, cap);
}
}
#[wasm_bindgen]
pub fn rotate_180(pointer: *mut u8, width: usize, height: usize) {
let pixel_length: usize = 4;
let line_length = width * pixel_length;
let half_height_untraited: f32 = (height as f32) / 2.0;
let size = width * height * pixel_length;
let half_height = ((half_height_untraited * 100.0).round() / 100.0) as usize;
let sl = unsafe { slice::from_raw_parts_mut(pointer, size) };
for line in 0..half_height {
let start_of_line = line * line_length;
let start_of_opposite_line = (height - 1 - line) * line_length;
for column in 0..width {
let pixel_start = start_of_line + column * pixel_length;
let pixel_end = pixel_start + pixel_length;
let opposite_pixel_start = start_of_opposite_line + column * pixel_length;
let opposite_pixel_end = opposite_pixel_start + pixel_length;
let opposite_pixel = [
sl[opposite_pixel_start],
sl[opposite_pixel_start + 1],
sl[opposite_pixel_start + 2],
sl[opposite_pixel_start + 3],
];
let target_pixel = [
sl[pixel_start],
sl[pixel_start + 1],
sl[pixel_start + 2],
sl[pixel_start + 3],
];
for item in opposite_pixel_start..opposite_pixel_end {
let is = item - opposite_pixel_start;
sl[item] = target_pixel[is];
}
for item in pixel_start..pixel_end {
let is = item - pixel_start;
sl[item] = opposite_pixel[is];
}
}
}
}
| true
|
597b685ed878424eee3dd647d2031ba15b3c6d30
|
Rust
|
marc47marc47/leetcode-cn
|
/74/search_matrix/src/lib.rs
|
UTF-8
| 1,514
| 3.03125
| 3
|
[] |
no_license
|
/*
* @Description:
* @Version: 2.0
* @Author: kingeasternsun
* @Date: 2021-02-23 16:27:37
* @LastEditors: kingeasternsun
* @LastEditTime: 2021-02-23 16:35:46
* @FilePath: \74\search_matrix\src\lib.rs
*/
pub struct Solution;
impl Solution {
pub fn search_matrix(matrix: Vec<Vec<i32>>, target: i32) -> bool {
let row = matrix.len();
if row == 0 {
return false;
}
let col = matrix[0].len();
if col == 0 {
return false;
}
let mut beg = 0;
let mut end = row * col - 1;
while beg <= end {
let mid = (end - beg) / 2 + beg;
if matrix[mid / col][mid % col] == target {
return true;
}
if matrix[mid / col][mid % col] > target {
if mid ==0 { //rust 二分查找的时候 一定要注意这个
return false
}
end = mid - 1
} else {
beg = mid + 1
}
}
return false;
}
}
#[cfg(test)]
mod tests {
use crate::Solution;
#[test]
fn it_works() {
assert_eq!(Solution::search_matrix(vec![vec![1,3,5,7],vec![10,11,16,20],vec![23,30,34,60]], 3),true);
assert_eq!(Solution::search_matrix(vec![vec![1,3,5,7],vec![10,11,16,20],vec![23,30,34,60]], 13),false);
assert_eq!(Solution::search_matrix(vec![vec![1]], 13),false);
assert_eq!(Solution::search_matrix(vec![vec![1]], -1),false);
}
}
| true
|
00c2b58f4240939fabd85c9022db52a175f6e4ee
|
Rust
|
ApophisLee/gbc
|
/lib/src/registers.rs
|
UTF-8
| 7,445
| 3.5
| 4
|
[] |
no_license
|
/// 8-bit register names
#[derive(Clone, Copy, Debug, PartialEq)]
#[allow(non_snake_case)]
pub enum Reg8 {
A,
F,
B,
C,
D,
E,
H,
L,
}
impl std::fmt::Display for Reg8 {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Reg8::A => write!(f, "A"),
Reg8::F => write!(f, "F"),
Reg8::B => write!(f, "B"),
Reg8::C => write!(f, "C"),
Reg8::D => write!(f, "D"),
Reg8::E => write!(f, "E"),
Reg8::H => write!(f, "H"),
Reg8::L => write!(f, "L"),
}
}
}
/// 16-bit register names
/// This includes the "combo" registers
#[derive(Clone, Copy, Debug, PartialEq)]
#[allow(non_snake_case)]
pub enum Reg16 {
AF,
BC,
DE,
HL,
PC,
SP,
}
impl std::fmt::Display for Reg16 {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Reg16::AF => write!(f, "AF"),
Reg16::BC => write!(f, "BC"),
Reg16::DE => write!(f, "DE"),
Reg16::HL => write!(f, "HL"),
Reg16::PC => write!(f, "PC"),
Reg16::SP => write!(f, "SP"),
}
}
}
/// A trait that defines basic register operations.
pub trait RegisterOps<R, V> {
fn read(&self, reg: R) -> V;
fn write(&mut self, reg: R, value: V);
}
#[derive(Clone, Copy, PartialEq)]
#[repr(u8)]
pub enum Flag {
Zero = 7,
Subtract = 6,
HalfCarry = 5,
Carry = 4,
}
#[allow(non_snake_case)]
#[derive(Default)]
#[cfg_attr(feature = "save", derive(serde::Serialize), derive(serde::Deserialize))]
pub struct RegisterFile {
// Registers
A: u8,
F: u8, // Flags register
B: u8,
C: u8,
D: u8,
E: u8,
H: u8,
L: u8,
pub PC: u16,
pub SP: u16,
}
impl RegisterFile {
const ZERO_MASK: u8 = 1 << 7;
const SUBTRACT_MASK: u8 = 1 << 6;
const HALF_CARRY_MASK: u8 = 1 << 5;
const CARRY_MASK: u8 = 1 << 4;
pub fn empty() -> Self {
Default::default()
}
/// Returns a new register file
///
/// * Registers are initialized to some values on boot based on GB mode
/// * SP is initialized to 0xFFFE on boot
/// * PC is initialized to 0x100 on boot (once internal ROM completes)
pub fn new(cgb: bool) -> Self {
if cgb {
Self {
A: 0x11,
F: 0x80,
B: 0x00,
C: 0x00,
D: 0xFF,
E: 0x56,
H: 0x00,
L: 0x0D,
PC: 0x0100,
SP: 0xFFFE,
}
} else {
Self {
A: 0x01,
F: 0xB0,
B: 0x00,
C: 0x13,
D: 0x00,
E: 0xD8,
H: 0x01,
L: 0x4D,
PC: 0x0100,
SP: 0xFFFE,
}
}
}
/// Set a flag
pub fn set(&mut self, flag: Flag, value: bool) {
// Build a bit mask for this flag
let mask: u8 = 1 << flag as u8;
// Update the flags register accordingly
if value {
self.F |= mask;
} else {
self.F &= !mask;
}
}
/// Clear a flag
pub fn clear(&mut self, flag: Flag) {
// Build a bit mask for this flag
let mask: u8 = 1 << flag as u8;
// Update the flags register accordingly
self.F &= !mask;
}
pub fn flags(&self) -> u8 {
self.F
}
pub fn zero(&self) -> bool {
(self.F & Self::ZERO_MASK) != 0
}
pub fn subtract(&self) -> bool {
(self.F & Self::SUBTRACT_MASK) != 0
}
pub fn half_carry(&self) -> bool {
(self.F & Self::HALF_CARRY_MASK) != 0
}
pub fn carry(&self) -> bool {
(self.F & Self::CARRY_MASK) != 0
}
}
impl RegisterOps<Reg8, u8> for RegisterFile {
fn read(&self, reg: Reg8) -> u8 {
match reg {
Reg8::A => self.A,
Reg8::F => self.F,
Reg8::B => self.B,
Reg8::C => self.C,
Reg8::D => self.D,
Reg8::E => self.E,
Reg8::H => self.H,
Reg8::L => self.L,
}
}
fn write(&mut self, reg: Reg8, value: u8) {
match reg {
Reg8::A => self.A = value,
Reg8::F => self.F = value & 0xF0, // Lower 4 bits are unused
Reg8::B => self.B = value,
Reg8::C => self.C = value,
Reg8::D => self.D = value,
Reg8::E => self.E = value,
Reg8::H => self.H = value,
Reg8::L => self.L = value,
}
}
}
impl RegisterOps<Reg16, u16> for RegisterFile {
fn read(&self, reg: Reg16) -> u16 {
match reg {
Reg16::AF => (self.A as u16) << 8 | self.F as u16,
Reg16::BC => (self.B as u16) << 8 | self.C as u16,
Reg16::DE => (self.D as u16) << 8 | self.E as u16,
Reg16::HL => (self.H as u16) << 8 | self.L as u16,
Reg16::SP => self.SP,
Reg16::PC => self.PC,
}
}
fn write(&mut self, reg: Reg16, value: u16) {
match reg {
Reg16::AF => {
self.A = (value >> 8) as u8;
self.F = value as u8 & 0xF0; // Lower 4 bits are unused
}
Reg16::BC => {
self.B = (value >> 8) as u8;
self.C = value as u8;
}
Reg16::DE => {
self.D = (value >> 8) as u8;
self.E = value as u8;
}
Reg16::HL => {
self.H = (value >> 8) as u8;
self.L = value as u8;
}
Reg16::PC => self.PC = value,
Reg16::SP => self.SP = value,
}
}
}
impl std::fmt::Display for RegisterFile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let af = self.read(Reg16::AF);
let bc = self.read(Reg16::BC);
let de = self.read(Reg16::DE);
let hl = self.read(Reg16::HL);
write!(f,
"AF: {:#06X}\n\
BC: {:#06X}\n\
DE: {:#06X}\n\
HL: {:#06X}\n\
SP: {:#06X}\n\
PC: {:#06X}\n\
Zero: {}\n\
Subtract: {}\n\
HalfCarry: {}\n\
Carry: {}",
af, bc, de, hl,
self.SP, self.PC, self.zero(),
self.subtract(), self.half_carry(), self.carry()
)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn combined_regs() {
let mut regs = RegisterFile::new(true);
regs.write(Reg8::A, 0x10);
regs.write(Reg8::F, 0xFF);
assert_eq!(regs.read(Reg16::AF), 0x10F0);
regs.write(Reg16::BC, 0xBEEF);
assert_eq!(regs.read(Reg8::B), 0xBE);
assert_eq!(regs.read(Reg8::C), 0xEF);
}
#[test]
fn flags() {
let mut registers = RegisterFile::new(true);
registers.set(Flag::Zero, true);
assert!(registers.zero());
assert_eq!(registers.F, 1 << 7);
registers.set(Flag::Carry, true);
assert!(registers.carry());
assert_eq!(registers.F, 1 << 7 | 1 << 4);
registers.clear(Flag::Zero);
assert!(!registers.zero());
assert_eq!(registers.F, 1 << 4);
}
}
| true
|
918df43a07e5a71dbe6628a816e592727d593d18
|
Rust
|
fukatani/rust-ant-book
|
/src/lcs.rs
|
UTF-8
| 1,032
| 3.09375
| 3
|
[] |
no_license
|
fn read<T: std::str::FromStr>() -> T {
let mut s = String::new();
std::io::stdin().read_line(&mut s).ok();
s.trim().parse().ok().unwrap()
}
fn main() {
let s: String = read();
let t: String = read();
let s = s.chars().collect::<Vec<_>>();
let t = t.chars().collect::<Vec<_>>();
let mut dp = vec![vec![0; t.len() + 1]; s.len() + 1];
for i in 0..s.len() {
for j in 0..t.len() {
if s[i] == t[j] {
dp[i + 1][j + 1] = dp[i][j] + 1;
} else {
dp[i + 1][j + 1] = std::cmp::max(dp[i][j + 1], dp[i + 1][j]);
}
}
}
// println!("{:?}", dp);
let mut ans: Vec<char> = Vec::new();
let mut i = s.len();
let mut j = t.len();
while i > 0 {
if dp[i][j] == dp[i - 1][j - 1] + 1 {
ans.push(s[i - 1]);
i -= 1;
j -= 1;
} else {
i -= 1;
}
}
ans.reverse();
for ch in ans {
print!("{}", ch);
}
println!("");
}
| true
|
4b7f5264a83f8df2502ec4eafe4e14a3b987426c
|
Rust
|
paritytech/substrate
|
/client/cli/src/commands/sign.rs
|
UTF-8
| 3,594
| 2.8125
| 3
|
[
"GPL-3.0-or-later",
"Classpath-exception-2.0",
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
permissive
|
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Implementation of the `sign` subcommand
use crate::{
error, params::MessageParams, utils, with_crypto_scheme, CryptoSchemeFlag, KeystoreParams,
};
use array_bytes::bytes2hex;
use clap::Parser;
use sp_core::crypto::SecretString;
use std::io::{BufRead, Write};
/// The `sign` command
#[derive(Debug, Clone, Parser)]
#[command(name = "sign", about = "Sign a message, with a given (secret) key")]
pub struct SignCmd {
/// The secret key URI.
/// If the value is a file, the file content is used as URI.
/// If not given, you will be prompted for the URI.
#[arg(long)]
suri: Option<String>,
#[allow(missing_docs)]
#[clap(flatten)]
pub message_params: MessageParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub keystore_params: KeystoreParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub crypto_scheme: CryptoSchemeFlag,
}
impl SignCmd {
/// Run the command
pub fn run(&self) -> error::Result<()> {
let sig = self.sign(|| std::io::stdin().lock())?;
std::io::stdout().lock().write_all(sig.as_bytes())?;
Ok(())
}
/// Sign a message.
///
/// The message can either be provided as immediate argument via CLI or otherwise read from the
/// reader created by `create_reader`. The reader will only be created in case that the message
/// is not passed as immediate.
pub(crate) fn sign<F, R>(&self, create_reader: F) -> error::Result<String>
where
R: BufRead,
F: FnOnce() -> R,
{
let message = self.message_params.message_from(create_reader)?;
let suri = utils::read_uri(self.suri.as_ref())?;
let password = self.keystore_params.read_password()?;
with_crypto_scheme!(self.crypto_scheme.scheme, sign(&suri, password, message))
}
}
fn sign<P: sp_core::Pair>(
suri: &str,
password: Option<SecretString>,
message: Vec<u8>,
) -> error::Result<String> {
let pair = utils::pair_from_suri::<P>(suri, password)?;
Ok(bytes2hex("0x", pair.sign(&message).as_ref()))
}
#[cfg(test)]
mod test {
use super::*;
const SEED: &str = "0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a";
#[test]
fn sign_arg() {
let cmd = SignCmd::parse_from(&[
"sign",
"--suri",
&SEED,
"--message",
&SEED,
"--password",
"12345",
"--hex",
]);
let sig = cmd.sign(|| std::io::stdin().lock()).expect("Must sign");
assert!(sig.starts_with("0x"), "Signature must start with 0x");
assert!(array_bytes::hex2bytes(&sig).is_ok(), "Signature is valid hex");
}
#[test]
fn sign_stdin() {
let cmd = SignCmd::parse_from(&[
"sign",
"--suri",
SEED,
"--message",
&SEED,
"--password",
"12345",
]);
let sig = cmd.sign(|| SEED.as_bytes()).expect("Must sign");
assert!(sig.starts_with("0x"), "Signature must start with 0x");
assert!(array_bytes::hex2bytes(&sig).is_ok(), "Signature is valid hex");
}
}
| true
|
bdf2d26ad1e70b57e90be5a5c1287cb94486eef9
|
Rust
|
ToruNiina/trajan
|
/src/snapshot.rs
|
UTF-8
| 1,372
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
//! A module to handle snapshot contained as any kind of format in the same way.
//!
//! It requires that Snapshot should be an indexable and the Output of Index
//! implementes trajan::particle::Particle trait.
//!
//! Through this, all the `SomeSnapshot` can be used in the same way.
use crate::particle::{Attribute, Particle};
use std::option::Option;
/// A trait to provide the same accessibility to any kind of snapshots.
pub trait Snapshot<T>: std::ops::Index<usize>
where
T: nalgebra::Scalar,
<Self as std::ops::Index<usize>>::Output: Particle<T>,
{
/// precision of the value (e.g. f32 or f64).
type Value;
/// returns how many particles are contained in the snapshot.
fn len(&self) -> usize;
/// Collects mass of each particle if it exists.
fn masses(&self) -> Option<std::vec::Vec<T>>;
/// Collects positions of each particle if it exists.
fn positions(&self) -> Option<std::vec::Vec<nalgebra::Vector3<T>>>;
/// Collects velocities of each particle if it exists.
fn velocities(&self) -> Option<std::vec::Vec<nalgebra::Vector3<T>>>;
/// Collects forces of each particle if it exists.
fn forces(&self) -> Option<std::vec::Vec<nalgebra::Vector3<T>>>;
/// Collects attributes of each particle if it exists.
fn attributes(&self, name: &str) -> Option<std::vec::Vec<Attribute>>;
}
| true
|
1c274300c636e5c8560f51ce96f79d8a5b8e2ae7
|
Rust
|
seanpianka/cargo-count
|
/src/count/mod.rs
|
UTF-8
| 1,960
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
mod counts;
pub use self::counts::Counts;
use fmt;
use language::Language;
use std::fmt as StdFmt;
use std::ops::Deref;
use std::path::PathBuf;
#[derive(Debug)]
pub struct Count {
pub lang: Language,
pub files: Vec<PathBuf>,
pub code: u64,
pub comments: u64,
pub tests: u64,
pub blanks: u64,
pub lines: u64,
pub usafe: u64,
pub sep: Option<char>,
}
impl Count {
pub fn new(lang: Language, sep: Option<char>) -> Self {
Count {
lang: lang,
files: vec![],
code: 0,
comments: 0,
tests: 0,
blanks: 0,
lines: 0,
usafe: 0,
sep: sep,
}
}
pub fn add_file(&mut self, f: PathBuf) {
self.files.push(f);
}
pub fn lines(&self) -> String {
fmt::format_number(self.lines, self.sep)
}
pub fn code(&self) -> String {
fmt::format_number(self.code, self.sep)
}
pub fn blanks(&self) -> String {
fmt::format_number(self.blanks, self.sep)
}
pub fn usafe(&self) -> String {
fmt::format_number(self.usafe, self.sep)
}
pub fn comments(&self) -> String {
fmt::format_number(self.comments, self.sep)
}
pub fn tests(&self) -> String {
fmt::format_number(self.tests, self.sep)
}
pub fn total_files(&self) -> String {
fmt::format_number(self.files.len() as u64, self.sep)
}
}
impl Deref for Count {
type Target = Language;
fn deref(&self) -> &<Self as Deref>::Target {
&self.lang
}
}
impl StdFmt::Display for Count {
fn fmt(&self, f: &mut StdFmt::Formatter) -> StdFmt::Result {
write!(
f,
"{}\t{}\t{}\t{}\t{}\t{}\t{}",
self.lang,
self.total_files(),
self.lines(),
self.blanks(),
self.comments(),
self.code(),
self.tests(),
)
}
}
| true
|
152125704f9d93628b404b7f126fbcb05b50e506
|
Rust
|
serverlesstechnology/cqrs
|
/src/persist/upcaster.rs
|
UTF-8
| 11,402
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
use serde_json::Value;
use std::fmt::{Display, Formatter};
use std::num::ParseIntError;
use std::str::FromStr;
use crate::persist::SerializedEvent;
/// Used to upcast and event from an older type or version to the current form. This is needed
/// to modify the structure of events older versions are already persisted.
pub trait EventUpcaster: Send + Sync {
/// Examines and event type and version to understand if the event should be upcasted.
fn can_upcast(&self, event_type: &str, event_version: &str) -> bool;
/// Modifies the serialized event to conform the the new structure.
fn upcast(&self, event: SerializedEvent) -> SerializedEvent;
}
/// A helper type for creating the upcaster function for a `SemanticVersionEventUpcaster`.
pub type SemanticVersionEventUpcasterFunc = dyn Fn(Value) -> Value + Send + Sync;
/// A representation of a semantic version used in a `SemanticVersionEventUpcaster`.
#[derive(Debug, PartialOrd, PartialEq, Eq)]
pub struct SemanticVersion {
major_version: u32,
minor_version: u32,
patch: u32,
}
impl SemanticVersion {
/// Identifies if one `SemanticVersion` supersedes another. Used to determine whether an
/// upcaster function should be applied.
///
/// E.g.,
/// - for upcaster v0.2.3 with code v0.2.2 --> upcaster is applied
/// - for upcaster v0.2.2 with code v0.2.2 --> upcaster is _not_ applied
pub fn supersedes(&self, other: &Self) -> bool {
if other.major_version < self.major_version {
return true;
}
if other.major_version == self.major_version {
if other.minor_version < self.minor_version {
return true;
}
if other.minor_version == self.minor_version && other.patch < self.patch {
return true;
}
}
false
}
}
impl Display for SemanticVersion {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}.{}.{}",
self.major_version, self.minor_version, self.patch
)
}
}
impl FromStr for SemanticVersion {
type Err = SemanticVersionError;
fn from_str(event_version: &str) -> Result<Self, Self::Err> {
let mut split_version = event_version.split('.').fuse();
let major_version = u32::from_str(split_version.next().unwrap())?;
let minor_version = split_version.next().map_or(Ok(0), u32::from_str)?;
let patch = split_version.next().map_or(Ok(0), u32::from_str)?;
Ok(Self {
major_version,
minor_version,
patch,
})
}
}
#[derive(Debug, PartialOrd, PartialEq, Eq)]
/// Type can not be converted to a `Semantic Version`.
pub struct SemanticVersionError;
impl From<ParseIntError> for SemanticVersionError {
fn from(_: ParseIntError) -> Self {
Self
}
}
/// This upcasts any event that has the same `event_type` and an `event_version` that is less than the
/// version configured on the upcaster.
///
/// ```
/// use cqrs_es::persist::{EventUpcaster,SemanticVersionEventUpcaster};
/// use serde_json::Value;
/// use cqrs_es::persist::SerializedEvent;
///
/// let upcast_function = Box::new(|payload: Value| match payload {
/// Value::Object(mut object_map) => {
/// object_map.insert("country".to_string(), "USA".into());
/// Value::Object(object_map)
/// }
/// _ => {
/// panic!("the event payload is not an object")
/// }
/// });
/// let upcaster = SemanticVersionEventUpcaster::new("EventX", "2.3.4", upcast_function);
///
/// let payload: Value = serde_json::from_str(
/// r#"{
/// "zip code": 98103,
/// "state": "Washington"
/// }"#,
/// ).unwrap();
/// let event = SerializedEvent::new(
/// String::new(),
/// 0,
/// String::new(),
/// String::new(),
/// String::new(),
/// payload,
/// Default::default(),
/// );
/// let upcasted_event = upcaster.upcast(event);
///
/// let expected_payload: Value = serde_json::from_str(
/// r#"{
/// "zip code": 98103,
/// "state": "Washington",
/// "country": "USA"
/// }"#,
/// ).unwrap();
/// let expected_event = SerializedEvent::new(
/// String::new(),
/// 0,
/// String::new(),
/// String::new(),
/// "2.3.4".to_string(),
/// expected_payload,
/// Default::default(),
/// );
///
/// assert_eq!(upcasted_event, expected_event);
/// ```
pub struct SemanticVersionEventUpcaster {
event_type: String,
event_version: SemanticVersion,
f: Box<SemanticVersionEventUpcasterFunc>,
}
impl SemanticVersionEventUpcaster {
/// Creates a `SemanticVersionEventUpcaster`
pub fn new(
event_type: &str,
event_version: &str,
f: Box<SemanticVersionEventUpcasterFunc>,
) -> Self {
let event_version: SemanticVersion = SemanticVersion::from_str(event_version)
.expect("event_version is not a valid semantic version");
Self {
event_type: event_type.to_string(),
event_version,
f,
}
}
}
impl EventUpcaster for SemanticVersionEventUpcaster {
fn can_upcast(&self, event_type: &str, event_version: &str) -> bool {
if event_type != self.event_type {
return false;
}
let event_version = match SemanticVersion::from_str(event_version) {
Ok(result) => result,
Err(_) => {
return false;
}
};
self.event_version.supersedes(&event_version)
}
fn upcast(&self, event: SerializedEvent) -> SerializedEvent {
let upcasted_payload = (self.f)(event.payload);
SerializedEvent {
aggregate_id: event.aggregate_id,
sequence: event.sequence,
aggregate_type: event.aggregate_type,
event_type: event.event_type,
event_version: self.event_version.to_string(),
payload: upcasted_payload,
metadata: event.metadata,
}
}
}
#[cfg(test)]
mod test {
use std::str::FromStr;
use crate::persist::SerializedEvent;
use serde_json::json;
use serde_json::Value;
use crate::persist::SemanticVersionEventUpcasterFunc;
use crate::persist::{
EventUpcaster, SemanticVersion, SemanticVersionError, SemanticVersionEventUpcaster,
};
fn semantic_version(major_version: u32, minor_version: u32, patch: u32) -> SemanticVersion {
SemanticVersion {
major_version,
minor_version,
patch,
}
}
#[test]
fn parse_version() {
assert_eq!(
semantic_version(2, 0, 0),
SemanticVersion::from_str("2").unwrap()
);
assert_eq!(
semantic_version(2, 3, 0),
SemanticVersion::from_str("2.3").unwrap()
);
assert_eq!(
semantic_version(2, 3, 4),
SemanticVersion::from_str("2.3.4").unwrap()
);
assert_eq!(
semantic_version(2, 3, 4),
SemanticVersion::from_str("2.3.4.5").unwrap()
);
}
#[test]
fn parse_version_invalid() {
assert_eq!(
Err(SemanticVersionError),
SemanticVersion::from_str("not_a_version")
);
assert_eq!(Err(SemanticVersionError), SemanticVersion::from_str(""));
assert_eq!(Err(SemanticVersionError), SemanticVersion::from_str("2."));
assert_eq!(Err(SemanticVersionError), SemanticVersion::from_str("2.3."));
}
#[test]
fn simple_upcaster_can_upcast() {
let upcaster =
SemanticVersionEventUpcaster::new("EventX", "2.3.4", Box::new(|event| event));
assert!(upcaster.can_upcast("EventX", "1.12.35"));
assert!(upcaster.can_upcast("EventX", "2.3.3"));
assert!(!upcaster.can_upcast("AnotherEvent", "1.12.35"));
assert!(!upcaster.can_upcast("EventX", "2.3.4"));
assert!(!upcaster.can_upcast("EventX", "2.3.5"));
assert!(!upcaster.can_upcast("EventX", "2.4.0"));
assert!(!upcaster.can_upcast("EventX", "3.0.0"));
}
#[test]
fn semantic_version_upcaster_can_upcast() {
SemanticVersionEventUpcaster::new("EventX", "2.3.4", test_upcast());
}
#[test]
#[should_panic]
fn semantic_version_upcaster_invalid_version() {
SemanticVersionEventUpcaster::new("EventX", "not_a_version", test_upcast());
}
#[test]
fn semantic_version_upcaster_upcast() {
let upcaster = SemanticVersionEventUpcaster::new("EventX", "2.3.4", test_upcast());
let payload: Value = serde_json::from_str(
r#"{
"id": 4829,
"name": "George Steinbrenner"
}"#,
)
.unwrap();
let event = SerializedEvent::new(
String::new(),
0,
String::new(),
String::new(),
String::new(),
payload,
Value::default(),
);
let upcasted_event = upcaster.upcast(event);
let expected: Value =
serde_json::from_str(r#"{"id":"CUST4829","name":"George Steinbrenner"}"#).unwrap();
assert_eq!(expected, upcasted_event.payload);
}
#[test]
fn semantic_version_upcaster_upcast_for_documentation() {
let upcast_function = Box::new(|payload: Value| {
if let Value::Object(mut object_map) = payload {
object_map.insert("country".to_string(), "USA".into());
Value::Object(object_map)
} else {
panic!("the event payload is not an object")
}
});
let upcaster = SemanticVersionEventUpcaster::new("EventX", "2.3.4", upcast_function);
let payload: Value = serde_json::from_str(
r#"{
"zip code": 98103,
"state": "Washington"
}"#,
)
.unwrap();
let event = SerializedEvent::new(
String::new(),
0,
String::new(),
String::new(),
String::new(),
payload,
Value::default(),
);
let upcasted_event = upcaster.upcast(event);
let expected_payload: Value = serde_json::from_str(
r#"{
"zip code": 98103,
"state": "Washington",
"country": "USA"
}"#,
)
.unwrap();
let expected_event = SerializedEvent::new(
String::new(),
0,
String::new(),
String::new(),
"2.3.4".to_string(),
expected_payload,
Value::default(),
);
assert_eq!(upcasted_event, expected_event);
}
fn test_upcast() -> Box<SemanticVersionEventUpcasterFunc> {
Box::new(|mut payload| {
let current_id = payload.get("id").unwrap().to_string();
let updated_id = format!("CUST{}", current_id);
*payload.get_mut("id").unwrap() = json!(updated_id);
payload
})
}
}
| true
|
604a0d1b771b12563fb7aa910d2ab2613e469fb9
|
Rust
|
drewfead/reels
|
/catalog-sv/src/core/action.rs
|
UTF-8
| 3,268
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
use log::{debug, info};
use uuid::Uuid;
use crate::core::{CreateMovieParams, DeleteMovie, IndexMovie, Movie, Page, UpdateMovieParams, HasId};
use crate::core::error::Error;
use crate::db;
use crate::db::DbConnection;
use crate::idx;
use crate::idx::IndexClient;
use either::Either;
pub fn create_movie(conn: &DbConnection, movie: CreateMovieParams) -> Result<Either<HasId, Movie>, Error> {
info!("creating movie {:?}", movie);
db::create_movie(conn, movie.create())
}
pub fn update_movie(conn: &DbConnection, id: Uuid, movie: UpdateMovieParams) -> Result<Option<Movie>, Error> {
info!("updating movie id={} {:?}", id, movie);
db::update_movie(conn, id, movie.update())
}
pub fn delete_movie(conn: &DbConnection, id: Uuid) -> Result<bool, Error> {
debug!("deleting movie id={}", id);
let soft_deleted = db::update_movie(conn, id, DeleteMovie.update())
.map(|opt| opt.is_some())?;
info!("soft deleted movie id={}", id);
Ok(soft_deleted)
}
pub fn delete_soft_deleted(conn: &DbConnection) -> Result<usize, Error> {
debug!("deleting movies that have been soft deleted");
let deleted = db::delete_soft_deleted(conn)?;
if deleted > 0 {
info!("deleted {} movies", deleted);
}
Ok(deleted)
}
pub fn find_one_movie(conn: &DbConnection, id: Uuid) -> Result<Option<Movie>, Error> {
info!("finding movie id={}", id);
db::find_one_movie(conn, id)
}
pub fn find_movies(conn: &DbConnection, count: i64, anchor: &Option<String>) -> Result<Page<Movie>, Error> {
info!("finding movies count={:?} anchor={:?}", count, anchor);
db::find_movies(conn, count, anchor)
}
pub fn find_movies_with_ids(conn: &DbConnection, ids: Vec<Uuid>) -> Result<Page<Movie>, Error> {
if ids.is_empty() {
return Ok(Page {
page_number: 1,
next_anchor: None,
items: Vec::new(),
})
}
info!("finding movies with ids in {:?}", ids);
db::find_movies_with_ids(conn, ids)
}
pub async fn search_movies(
client: &IndexClient,
search_term: &String,
count: i64,
anchor: &Option<String>
) -> Result<Page<Either<HasId, Movie>>, Error> {
info!("searching movies search_term={} count={:?} anchor={:?}", search_term, count, anchor);
idx::search_movies(client, search_term, count, anchor).await
}
pub async fn create_index(client: &IndexClient) -> Result<bool, Error> {
info!("creating catalog index");
idx::create_index(client).await
}
pub fn find_movies_to_index(conn: &DbConnection, count: i64) -> Result<Vec<Movie>, Error> {
debug!("finding movies to index count={:?}", count);
let stale = db::find_stale_indexed(conn, count)?;
if !stale.is_empty() {
info!("found stale movies to index count={:?}", stale.len())
}
Ok(stale)
}
pub async fn index_movies(client: &IndexClient, movies: Vec<Movie>) -> Result<Vec<Movie>, Error> {
info!("adding movies to catalog index {:?}", movies);
idx::index_movies(client, movies).await
}
pub fn mark_movies_indexed(conn: &DbConnection, movies: Vec<Movie>) -> Result<Vec<Movie>, Error> {
debug!("marking movies indexed {:?}", movies);
db::update_movies(
conn,
movies.iter().map(|m|m.id).collect(),
IndexMovie.update()
)
}
| true
|
8eed30f78986a7911119a6f2ba6ab1d3602ee580
|
Rust
|
mesalock-linux/crates-io
|
/vendor/rocket/src/request/form/from_form.rs
|
UTF-8
| 4,425
| 3.765625
| 4
|
[
"Apache-2.0",
"Unlicense",
"BSD-3-Clause",
"0BSD",
"MIT"
] |
permissive
|
use crate::request::FormItems;
/// Trait to create an instance of some type from an HTTP form.
/// [`Form`](crate::request::Form) requires its generic type to implement this trait.
///
/// # Deriving
///
/// This trait can be automatically derived. When deriving `FromForm`, every
/// field in the structure must implement
/// [`FromFormValue`](crate::request::FromFormValue). Rocket validates each field in
/// the structure by calling its `FromFormValue` implementation. You may wish to
/// implement `FromFormValue` for your own types for custom, automatic
/// validation.
///
/// ```rust
/// # #![feature(proc_macro_hygiene)]
/// # #![allow(deprecated, dead_code, unused_attributes)]
/// # #[macro_use] extern crate rocket;
/// #[derive(FromForm)]
/// struct TodoTask {
/// description: String,
/// completed: bool
/// }
/// # fn main() { }
/// ```
///
/// # Data Guard
///
/// Types that implement `FromForm` can be parsed directly from incoming form
/// data via the `data` parameter and `Form` type.
///
/// ```rust
/// # #![feature(proc_macro_hygiene)]
/// # #![allow(deprecated, dead_code, unused_attributes)]
/// # #[macro_use] extern crate rocket;
/// # use rocket::request::Form;
/// # #[derive(FromForm)]
/// # struct TodoTask { description: String, completed: bool }
/// #[post("/submit", data = "<task>")]
/// fn submit_task(task: Form<TodoTask>) -> String {
/// format!("New task: {}", task.description)
/// }
/// # fn main() { }
/// ```
///
/// # Implementing
///
/// Implementing `FromForm` should be a rare occurrence. Prefer instead to use
/// Rocket's built-in derivation.
///
/// When implementing `FromForm`, use the [`FormItems`] iterator to iterate
/// through the raw form key/value pairs. Be aware that form fields that are
/// typically hidden from your application, such as `_method`, will be present
/// while iterating. Ensure that you adhere to the properties of the `strict`
/// parameter, as detailed in the documentation below.
///
/// ## Example
///
/// Consider the following scenario: we have a struct `Item` with field name
/// `field`. We'd like to parse any form that has a field named either `balloon`
/// _or_ `space`, and we'd like that field's value to be the value for our
/// structure's `field`. The following snippet shows how this would be
/// implemented:
///
/// ```rust
/// use rocket::request::{FromForm, FormItems};
///
/// struct Item {
/// field: String
/// }
///
/// impl<'f> FromForm<'f> for Item {
/// // In practice, we'd use a more descriptive error type.
/// type Error = ();
///
/// fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<Item, ()> {
/// let mut field = None;
///
/// for item in items {
/// match item.key.as_str() {
/// "balloon" | "space" if field.is_none() => {
/// let decoded = item.value.url_decode().map_err(|_| ())?;
/// field = Some(decoded);
/// }
/// _ if strict => return Err(()),
/// _ => { /* allow extra value when not strict */ }
/// }
/// }
///
/// field.map(|field| Item { field }).ok_or(())
/// }
/// }
/// ```
pub trait FromForm<'f>: Sized {
/// The associated error to be returned when parsing fails.
type Error;
/// Parses an instance of `Self` from the iterator of form items `it`.
///
/// Extra form field are allowed when `strict` is `false` and disallowed
/// when `strict` is `true`.
///
/// # Errors
///
/// If `Self` cannot be parsed from the given form items, an instance of
/// `Self::Error` will be returned.
///
/// When `strict` is `true` and unexpected, extra fields are present in
/// `it`, an instance of `Self::Error` will be returned.
fn from_form(it: &mut FormItems<'f>, strict: bool) -> Result<Self, Self::Error>;
}
impl<'f, T: FromForm<'f>> FromForm<'f> for Option<T> {
type Error = std::convert::Infallible;
#[inline]
fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<Option<T>, Self::Error> {
Ok(T::from_form(items, strict).ok())
}
}
impl<'f, T: FromForm<'f>> FromForm<'f> for Result<T, T::Error> {
type Error = std::convert::Infallible;
#[inline]
fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<Self, Self::Error> {
Ok(T::from_form(items, strict))
}
}
| true
|
3708a89c6a62a2bc65316746335a17a43bf487db
|
Rust
|
hummans/feeless
|
/src/version.rs
|
UTF-8
| 692
| 3.03125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
use crate::Error;
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Formatter};
use std::str::FromStr;
/// Network version of a node.
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum Version {
V18 = 18,
V19 = 19,
}
impl Display for Version {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.to_string()[1..].to_owned())
}
}
impl FromStr for Version {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"18" => Version::V18,
"19" => Version::V19,
v => return Err(Error::InvalidVersion(v.into())),
})
}
}
| true
|
99c9b78456c0b513dd4d3bddbf77538d5f55fc6e
|
Rust
|
delneg/bitcoin-address-generator-api
|
/jelly/src/request/flash.rs
|
UTF-8
| 1,585
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
use actix_web::HttpRequest;
use actix_session::UserSession;
use crate::templates::FlashMessage;
use crate::error::Error;
/// `FlashMessages` implements a one-time-message (hence "Flash") that is useful
/// for old-school HTML flows that need to display messages in a standardized way
/// across pages.
///
/// This could potentially do less serialization, but it's fine for now.
/// TODO: Look at whether this can be done with just &str rather than String.
pub trait FlashMessages {
/// Adds a flash message to the stack.
fn flash(&self, title: &str, message: &str) -> Result<(), Error>;
/// Internally used; loads flash messages for template use and removes the existing
/// stack.
fn get_flash_messages(&self) -> Result<Vec<FlashMessage>, Error>;
}
impl FlashMessages for HttpRequest {
fn flash(&self, title: &str, message: &str) -> Result<(), Error> {
let session = self.get_session();
let mut messages: Vec<FlashMessage> = match session.get("flsh")? {
Some(messages) => messages,
None => Vec::new()
};
messages.push(FlashMessage {
title: title.to_string(),
message: message.to_string()
});
session.set("flsh", messages)?;
Ok(())
}
fn get_flash_messages(&self) -> Result<Vec<FlashMessage>, Error> {
let session = self.get_session();
let messages = match session.get("flsh")? {
Some(messages) => messages,
None => Vec::new()
};
session.remove("flsh");
Ok(messages)
}
}
| true
|
d1c2c39157a2ac5f64e042735b39cbe6a0fc3ec3
|
Rust
|
diegopy/fluvio
|
/src/extension-consumer/src/produce/mod.rs
|
UTF-8
| 4,255
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
use std::fs::File;
use std::io::{BufReader, BufRead};
use std::path::PathBuf;
use structopt::StructOpt;
use fluvio::{Fluvio, TopicProducer};
use fluvio_types::print_cli_ok;
use crate::common::FluvioExtensionMetadata;
use crate::Result;
/// Produce log configuration parameters
#[derive(Debug)]
pub struct ProduceLogConfig {
pub topic: String,
pub partition: i32,
pub continuous: bool,
}
#[derive(Debug)]
pub enum FileRecord {
Lines(PathBuf),
Files(Vec<PathBuf>),
}
// -----------------------------------
// CLI Options
// -----------------------------------
#[derive(Debug, StructOpt)]
pub struct ProduceLogOpt {
/// The name of the Topic to produce to
#[structopt(value_name = "topic")]
pub topic: String,
/// The ID of the Partition to produce to
#[structopt(
short = "p",
long = "partition",
value_name = "integer",
default_value = "0"
)]
pub partition: i32,
/// Send messages in an infinite loop
#[structopt(short = "C", long = "continuous")]
pub continuous: bool,
/// Send each line of the file as its own Record
#[structopt(
short = "l",
long = "record-per-line",
value_name = "filename",
parse(from_os_str)
)]
record_per_line: Option<PathBuf>,
/// Send an entire file as a single Record
#[structopt(
short = "r",
long = "record-file",
value_name = "filename",
parse(from_os_str),
conflicts_with = "record-per-line"
)]
record_file: Vec<PathBuf>,
}
impl ProduceLogOpt {
pub async fn process(self, fluvio: &Fluvio) -> Result<()> {
let (cfg, file_records) = self.validate()?;
let mut producer = fluvio.topic_producer(&cfg.topic).await?;
if let Some(records) = file_records {
produce_from_files(&mut producer, cfg, records).await?;
} else {
produce_stdin(&mut producer, cfg).await?;
}
Ok(())
}
/// Validate cli options. Generate target-server and produce log configuration.
pub fn validate(self) -> Result<(ProduceLogConfig, Option<FileRecord>)> {
let file_records = if let Some(record_per_line) = self.record_per_line {
Some(FileRecord::Lines(record_per_line))
} else if !self.record_file.is_empty() {
Some(FileRecord::Files(self.record_file))
} else {
None
};
let produce_log_cfg = ProduceLogConfig {
topic: self.topic,
partition: self.partition,
continuous: self.continuous,
};
Ok((produce_log_cfg, file_records))
}
pub fn metadata() -> FluvioExtensionMetadata {
FluvioExtensionMetadata {
command: "produce".into(),
description: "Produce new data in a stream".into(),
version: env!("CARGO_PKG_VERSION").into(),
}
}
}
/// Sends records to a Topic based on the file configuration given
///
/// This will either send the lines of a single file as individual records,
/// or it will send the entirety of a list of files as records, where each
/// whole file is one record.
async fn produce_from_files(
producer: &mut TopicProducer,
cfg: ProduceLogConfig,
records: FileRecord,
) -> Result<()> {
match records {
FileRecord::Files(paths) => {
for path in paths {
let bytes = std::fs::read(&path)?;
producer.send_record(&bytes, cfg.partition).await?;
print_cli_ok!();
}
}
FileRecord::Lines(path) => {
let file = File::open(&path)?;
let mut lines = BufReader::new(file).lines();
while let Some(Ok(line)) = lines.next() {
producer.send_record(&line, cfg.partition).await?;
}
print_cli_ok!();
}
}
Ok(())
}
/// Sends each line of stdin as a record
async fn produce_stdin(producer: &mut TopicProducer, cfg: ProduceLogConfig) -> Result<()> {
let mut stdin_lines = BufReader::new(std::io::stdin()).lines();
while let Some(Ok(line)) = stdin_lines.next() {
producer.send_record(&line, cfg.partition).await?;
print_cli_ok!();
}
Ok(())
}
| true
|
c941dda8f506c394af12e256c32c14b35242fe38
|
Rust
|
docknetwork/jsonrpc-proxy
|
/plugins/upstream/src/helpers.rs
|
UTF-8
| 2,722
| 2.921875
| 3
|
[] |
no_license
|
//! Request parsing helper methods.
use pubsub;
use rpc;
use serde_json;
/// Attempt to peek subscription id from the request given as bytes.
///
/// TODO [ToDr] The implementation should deserialize only subscriptionId part,
/// not the entire `Notification`
pub fn peek_subscription_id(bytes: &[u8]) -> Option<pubsub::SubscriptionId> {
serde_json::from_slice::<rpc::Notification>(bytes)
.ok()
.and_then(|notification| {
if let rpc::Params::Map(ref map) = notification.params {
map.get("subscription").and_then(|v| pubsub::SubscriptionId::parse_value(v))
} else {
None
}
})
}
/// Attempt to peek the result of a successful call.
///
/// TODO [ToDr] The implementation should deserialize only result part,
/// not the entire `rpc::Success`
pub fn peek_result(bytes: &[u8]) -> Option<rpc::Value> {
serde_json::from_slice::<rpc::Success>(bytes)
.ok()
.map(|res| res.result)
}
/// Attempt to peek the id of a call.
///
/// TODO [ToDr] The implementation should deserialize only id part,
/// not the entire `rpc::Call`
pub fn peek_id(bytes: &[u8]) -> Option<rpc::Id> {
serde_json::from_slice::<rpc::Call>(bytes)
.ok()
.and_then(|call| get_id(&call).cloned())
}
/// Extract method name of given call.
pub fn get_method_name(call: &rpc::Call) -> Option<&str> {
match *call {
rpc::Call::MethodCall(rpc::MethodCall { ref method, .. }) => Some(method),
rpc::Call::Notification(rpc::Notification { ref method, .. }) => Some(method),
rpc::Call::Invalid { .. } => None,
}
}
/// Get id of given call.
pub fn get_id(call: &rpc::Call) -> Option<&rpc::Id> {
match *call {
rpc::Call::MethodCall(rpc::MethodCall { ref id, .. }) => Some(id),
rpc::Call::Notification(_) => None,
rpc::Call::Invalid { ref id, .. } => Some(id),
}
}
/// Extract the first parameter of a call and parse it as subscription id.
pub fn get_unsubscribe_id(call: &rpc::Call) -> Option<pubsub::SubscriptionId> {
match *call {
rpc::Call::MethodCall(rpc::MethodCall { ref params, .. }) |
rpc::Call::Notification(rpc::Notification { ref params, .. }) => match params {
rpc::Params::Array(ref vec) if !vec.is_empty() => {
pubsub::SubscriptionId::parse_value(&vec[0])
},
_ => {
warn!("Invalid unsubscribe params: {:?}. Perhaps it's not really an unsubscribe call?", call);
None
},
},
_ => {
warn!("Invalid unsubscribe payload: {:?}. Perhaps it's not really an unsubscribe call?", call);
None
},
}
}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.