Allow unsigned or signed input to frac macro
All checks were successful
timw4mail/rusty-numbers/pipeline/head This commit looks good

This commit is contained in:
Timothy Warren 2020-02-18 16:38:26 -05:00
parent 58bddf6206
commit caeb1879c4
2 changed files with 128 additions and 91 deletions

View File

@ -7,6 +7,32 @@ use core::ops::{
Mul, MulAssign, Not, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Sign {
/// Greater than zero, or zero
Positive,
/// Less than zero
Negative,
}
impl Default for Sign {
fn default() -> Self {
Sign::Positive
}
}
impl Not for Sign {
type Output = Sign;
fn not(self) -> Self::Output {
match self {
Self::Positive => Self::Negative,
Self::Negative => Self::Positive,
}
}
}
/// Native number type
pub trait Num:
Add
@ -52,6 +78,9 @@ pub trait Int:
+ ShlAssign
+ ShrAssign
{
/// Associated type for unsigned conversion
type Un;
/// The maximum value of the type
fn max_value() -> Self;
@ -60,6 +89,13 @@ pub trait Int:
/// Is this number less than zero?
fn is_neg(self) -> bool;
/// Convert to an unsigned number
///
/// A meaningless operation when implemented on an
/// unsigned type, but the interface consistency solves
/// other issues
fn to_unsigned(self) -> Self::Un;
}
/// A Trait representing unsigned integer primitives
@ -73,47 +109,19 @@ pub trait Unsigned: Int {
fn is_signed(self) -> bool {
false
}
fn to_unsigned(self) -> Self {
self
}
}
/// A Trait representing signed integer primitives
pub trait Signed: Int {
type Un;
fn to_unsigned(self) -> Self::Un;
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Sign {
/// Greater than zero, or zero
Positive,
/// Less than zero
Negative,
}
impl Default for Sign {
fn default() -> Self {
Sign::Positive
}
}
impl Not for Sign {
type Output = Sign;
fn not(self) -> Self::Output {
match self {
Self::Positive => Self::Negative,
Self::Negative => Self::Positive,
}
}
}
pub trait Signed: Int {}
macro_rules! impl_num {
($( $Type: ty ),* ) => {
$(
impl Num for $Type {
}
impl Num for $Type {}
)*
}
}
@ -131,19 +139,20 @@ macro_rules! impl_float {
}
macro_rules! impl_int {
($( $Type: ty ),* ) => {
($(($type: ty, $un_type: ty)),* ) => {
$(
impl Int for $Type {
impl Int for $type {
type Un = $un_type;
fn is_zero(self) -> bool {
self == 0
}
fn max_value() -> $Type {
<$Type>::max_value()
fn max_value() -> $type {
<$type>::max_value()
}
/// Is this number less than zero?
fn is_neg(self) -> bool {
if self.is_signed() == false {
false
@ -151,6 +160,13 @@ macro_rules! impl_int {
self < 0
}
}
fn to_unsigned(self) -> $un_type {
// Converting from signed to unsigned should always be safe
// when using the absolute value, especially since I'm converting
// between the same bit size
<$un_type>::try_from(self).unwrap()
}
}
)*
}
@ -160,7 +176,8 @@ macro_rules! impl_unsigned {
($($Type: ty),* ) => {
$(
impl Unsigned for $Type {
/// Implementation based on https://en.wikipedia.org/wiki/Binary_GCD_algorithm
/// Implementation based on
/// [https://en.wikipedia.org/wiki/Binary_GCD_algorithm](https://en.wikipedia.org/wiki/Binary_GCD_algorithm)
fn gcd(a: $Type, b: $Type) -> $Type {
if a == b {
return a;
@ -208,34 +225,31 @@ macro_rules! impl_unsigned {
}
macro_rules! impl_signed {
($(($type: ty, $un_type: ty)),* ) => {
($($type: ty),* ) => {
$(
impl Signed for $type {
type Un = $un_type;
fn to_unsigned(self) -> $un_type {
// Converting from signed to unsigned should always be safe
// when using the absolute value, especially since I'm converting
// between the same bit size
<$un_type>::try_from(self).unwrap()
}
}
impl Signed for $type {}
)*
}
}
impl_num!(i8, u8, i16, u16, f32, i32, u32, f64, i64, u64, i128, u128, isize, usize);
impl_float!(f32, f64);
impl_int!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128, isize, usize);
impl_unsigned!(u8, u16, u32, u64, u128, usize);
impl_signed!(
impl_int!(
(i8, u8),
(u8, u8),
(i16, u16),
(u16, u16),
(i32, u32),
(u32, u32),
(i64, u64),
(u64, u64),
(i128, u128),
(isize, usize)
(u128, u128),
(isize, usize),
(usize, usize)
);
impl_unsigned!(u8, u16, u32, u64, u128, usize);
impl_signed!(i8, i16, i32, i64, i128, isize);
#[cfg(test)]
mod tests {

View File

@ -1,8 +1,9 @@
//! # Rational Numbers (fractions)
use crate::num::*;
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
/// Type representing a fraction
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Frac<T: Unsigned = usize> {
numer: T,
@ -11,7 +12,20 @@ pub struct Frac<T: Unsigned = usize> {
}
#[macro_export]
/// Create a `Frac` type with signed number literals
/// Create a [Frac](rational/struct.Frac.html) type with signed or unsigned number literals
///
/// Accepts:
///
/// ```no-run
/// // Fractions
/// frac!(1/3);
///
/// // Whole numbers
/// frac!(5u8);
///
/// // Whole numbers and fractions
/// frac!(1 1/2);
/// ```
macro_rules! frac {
($w:literal $n:literal / $d:literal) => {
frac!($w) + frac!($n / $d)
@ -24,14 +38,16 @@ macro_rules! frac {
};
}
/// Create a new rational number from unsigned integers
fn frac<S: Signed + Signed<Un = U>, U: Unsigned>(n: S, d: S) -> Frac<U> {
// Converting from signed to unsigned should always be safe
// when using the absolute value, especially since I'm converting
// between the same bit size
#[derive(Debug, Copy, Clone, PartialEq)]
enum FracOp {
Subtraction,
Other,
}
/// Create a new rational number from signed or unsigned integers
#[allow(dead_code)]
fn frac<T: Int + Int<Un = U>, U: Unsigned>(n: T, d: T) -> Frac<U> {
let mut sign = Sign::Positive;
let numer = n.to_unsigned();
let denom = d.to_unsigned();
if n.is_neg() {
sign = !sign;
@ -41,11 +57,17 @@ fn frac<S: Signed + Signed<Un = U>, U: Unsigned>(n: S, d: S) -> Frac<U> {
sign = !sign;
}
Frac { numer, denom, sign }.reduce()
let numer = n.to_unsigned();
let denom = d.to_unsigned();
Frac::new(numer, denom, sign)
}
impl<T: Unsigned> Frac<T> {
/// Create a new rational number
/// Create a new rational number from unsigned integers and a sign
///
/// Generally, you will probably prefer to use the [frac!](../macro.frac.html) macro
/// instead, as that accepts both signed and unsigned arguments
pub fn new(n: T, d: T, s: Sign) -> Frac<T> {
if d.is_zero() {
panic!("Fraction can not have a zero denominator");
@ -60,9 +82,13 @@ impl<T: Unsigned> Frac<T> {
}
/// Determine the output sign given the two input signs
fn get_sign(a: Self, b: Self) -> Sign {
fn get_sign(a: Self, b: Self, c: FracOp) -> Sign {
if a.sign != b.sign {
Sign::Negative
if c == FracOp::Subtraction && b.sign == Sign::Negative {
Sign::Positive
} else {
Sign::Negative
}
} else {
Sign::Positive
}
@ -84,7 +110,7 @@ impl<T: Unsigned + Mul<Output = T>> Mul for Frac<T> {
fn mul(self, rhs: Self) -> Self {
let numer = self.numer * rhs.numer;
let denom = self.denom * rhs.denom;
let sign = Self::get_sign(self, rhs);
let sign = Self::get_sign(self, rhs, FracOp::Other);
Self::new(numer, denom, sign)
}
@ -102,7 +128,7 @@ impl<T: Unsigned + Mul<Output = T>> Div for Frac<T> {
fn div(self, rhs: Self) -> Self {
let numer = self.numer * rhs.denom;
let denom = self.denom * rhs.numer;
let sign = Self::get_sign(self, rhs);
let sign = Self::get_sign(self, rhs, FracOp::Other);
Self::new(numer, denom, sign)
}
@ -137,14 +163,14 @@ impl<T: Unsigned + Add<Output = T> + Sub<Output = T> + Mul<Output = T>> Add for
// worrying about reducing to the least common denominator
let numer = (a.numer * b.denom) + (b.numer * a.denom);
let denom = a.denom * b.denom;
let sign = Self::get_sign(a, b);
let sign = Self::get_sign(a, b, FracOp::Other);
return Self::new(numer, denom, sign);
}
let numer = a.numer + b.numer;
let denom = self.denom;
let sign = Self::get_sign(a, b);
let sign = Self::get_sign(a, b, FracOp::Other);
Self::new(numer, denom, sign)
}
@ -160,28 +186,21 @@ impl<T: Unsigned + Sub<Output = T> + Mul<Output = T>> Sub for Frac<T> {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
let a = if self.numer >= rhs.numer {
self
} else {
rhs
};
let b = if self.numer < rhs.numer {
self
} else {
rhs
};
// Set the larger argument as `a`
let a = self;
let b = rhs;
if a.denom != b.denom {
let numer = (a.numer * b.denom) - (b.numer * a.denom);
let denom = a.denom * b.denom;
let sign = Self::get_sign(a, b);
let sign = Self::get_sign(a, b, FracOp::Subtraction);
return Self::new(numer, denom, sign);
}
let numer = a.numer - b.numer;
let denom = a.denom;
let sign = Self::get_sign(a, b);
let sign = Self::get_sign(a, b, FracOp::Subtraction);
Self::new(numer, denom, sign)
}
@ -210,10 +229,10 @@ mod tests {
#[test]
fn mul_test() {
let frac1 = Frac::new(1u8, 3u8, Sign::Positive);
let frac2 = Frac::new(2u8, 3u8, Sign::Positive);
let frac1 = frac!(1 / 3u8);
let frac2 = frac!(2u8 / 3);
let expected = Frac::new(2u8, 9u8, Sign::Positive);
let expected = frac!(2u8 / 9);
assert_eq!(frac1 * frac2, expected);
}
@ -221,21 +240,25 @@ mod tests {
#[test]
fn add_test() {
assert_eq!(frac!(5 / 6), frac!(1 / 3) + frac!(1 / 2));
assert_eq!(frac!(1 / 3), frac!(2 / 3) + -frac!(1 / 3), "2/3 + -1/3");
assert_eq!(-frac!(1 / 3), -frac!(2 / 3) + frac!(1 / 3), "-2/3 + 1/3");
}
#[test]
fn sub_test() {
assert_eq!(frac!(1/6), frac!(1 / 2) - frac!(1/3));
assert_eq!(frac!(1 / 6), frac!(1 / 2) - frac!(1 / 3));
// assert_eq!(frac!(1), frac!(1/3) - -frac!(2/3), "1/3 - -2/3");
// assert_eq!(-frac!(1 / 1), -frac!(2 / 3) - frac!(1 / 3), "-2/3 - 1/3");
}
#[test]
fn macro_test() {
let frac1 = frac!(1 / 3);
let frac2 = Frac::new(1u32, 3, Sign::Positive);
let frac2 = frac!(1u32 / 3);
assert_eq!(frac1, frac2);
let frac1 = -frac!(1 / 2);
let frac2 = Frac::new(1u32, 2, Sign::Negative);
let frac2 = -frac!(1u32 / 2);
assert_eq!(frac1, frac2);
assert_eq!(frac!(3 / 2), frac!(1 1/2));