1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
use std::collections::HashMap;
use std::path::Path;
use std::rc::Rc;

use collada::document::ColladaDocument;
use collada;

use math::*;
use skeleton::Skeleton;
use transform::Transform;

/// A single skeletal pose
#[derive(Debug)]
pub struct AnimationSample<T: Transform>
{

    /// Local pose transforms for each joint in the targeted skeleton
    /// (relative to parent joint)
    pub local_poses: Vec<T>,

}

/// A sequence of skeletal pose samples at some sample rate
#[derive(Debug)]
pub struct AnimationClip<T: Transform> {

    /// The sequence of skeletal poses
    pub samples: Vec<AnimationSample<T>>,

    /// Sample rate for the clip. Assumes a constant sample rate.
    pub samples_per_second: f32,

}

#[derive(Debug, RustcDecodable)]
pub struct AnimationClipDef {
    pub name: String,
    pub source: String,
    pub duration: f32,
    pub rotate_z: f32,
}

#[derive(Debug, RustcDecodable)]
pub struct DifferenceClipDef {
    pub name: String,
    pub source_clip: String,
    pub reference_clip: String,
}

impl<T: Transform> AnimationClip<T> {

    pub fn from_def(clip_def: &AnimationClipDef) -> AnimationClip<T> {

        // Wacky. Shouldn't it be an error if the struct field isn't present?
        // FIXME - use an Option
        let adjust = if !clip_def.rotate_z.is_nan() {
            mat4_rotate_z(clip_def.rotate_z.to_radians())
        } else {
            mat4_id()
        };

        // FIXME - load skeleton separately?
        let collada_document = ColladaDocument::from_path(&Path::new(&clip_def.source[..])).unwrap();
        let animations = collada_document.get_animations();
        let skeleton_set = collada_document.get_skeletons().unwrap();
        let skeleton = Skeleton::from_collada(&skeleton_set[0]);

        let mut clip = AnimationClip::from_collada(&skeleton, &animations, &adjust);

        if !clip_def.duration.is_nan() {
            clip.set_duration(clip_def.duration);
        }
        clip

    }

    /// Overrides the sampling rate of the clip to give the given duration (in seconds).
    pub fn set_duration(&mut self, duration: f32) {
        self.samples_per_second = self.samples.len() as f32 / duration;
    }

    /// Return the duration of the clip in seconds
    pub fn get_duration(&self) -> f32 {
        self.samples.len() as f32 / self.samples_per_second
    }

    /// Obtains the interpolated skeletal pose at the given sampling time.
    ///
    /// # Arguments
    ///
    /// * `time` - The time to sample with, relative to the start of the animation
    /// * `blended_poses` - The output array slice of joint transforms that will be populated
    ///                     for each joint in the skeleton.
    pub fn get_pose_at_time(&self, elapsed_time: f32, blended_poses: &mut [T]) {

        let interpolated_index = elapsed_time * self.samples_per_second;

        let index_1 = interpolated_index.floor() as usize;
        let index_2 = interpolated_index.ceil() as usize;

        let blend_factor = interpolated_index - index_1 as f32;

        let index_1 = index_1 % self.samples.len();
        let index_2 = index_2 % self.samples.len();

        let sample_1 = &self.samples[index_1];
        let sample_2 = &self.samples[index_2];


        for i in (0 .. sample_1.local_poses.len()) {

            let pose_1 = sample_1.local_poses[i];
            let pose_2 = sample_2.local_poses[i];

            let blended_pose = &mut blended_poses[i];
            *blended_pose = pose_1.lerp(pose_2, blend_factor);
        }

    }

    /// Create a difference clip from a source and reference clip for additive blending.
    pub fn as_difference_clip(source_clip: &AnimationClip<T>, reference_clip: &AnimationClip<T>) -> AnimationClip<T> {

        let samples = (0 .. source_clip.samples.len()).map(|sample_index| {

            let ref source_sample = source_clip.samples[sample_index];

            // Extrapolate reference clip by wrapping, if reference clip is shorter than source clip
            let ref reference_sample = reference_clip.samples[sample_index % reference_clip.samples.len()];

            let difference_poses = (0 .. source_sample.local_poses.len()).map(|joint_index| {
                let source_pose = source_sample.local_poses[joint_index];
                let reference_pose = reference_sample.local_poses[joint_index];
                reference_pose.inverse().concat(source_pose)
            }).collect();

            AnimationSample {
                local_poses: difference_poses,
            }

        }).collect();

        AnimationClip {
            samples_per_second: source_clip.samples_per_second,
            samples: samples,
        }
    }

    /// Creates an `AnimationClip` from a collection of `collada::Animation`.
    ///
    /// # Arguments
    ///
    /// * `skeleton` - The `Skeleton` that the `AnimationClip` will be created for.
    /// * `animations` - The collection of `collada::Animation`s that will be converted into an
    ///                  `AnimationClip`, using the given `Skeleton`.
    /// * `transform` - An offset transform to apply to the root pose of each animation sample,
    ///                 useful for applying rotation, translation, or scaling when loading an
    ///                 animation.
    pub fn from_collada(skeleton: &Skeleton, animations: &Vec<collada::Animation>, transform: &Matrix4<f32>) -> AnimationClip<T> {
        use std::f32::consts::PI;

        // Z-axis is 'up' in COLLADA, so need to rotate root pose about x-axis so y-axis is 'up'
        let rotate_on_x =
            [
                [1.0, 0.0, 0.0, 0.0],
                [0.0, (PI/2.0).cos(), (PI/2.0).sin(), 0.0],
                [0.0, (-PI/2.0).sin(), (PI/2.0).cos(), 0.0],
                [0.0, 0.0, 0.0, 1.0],
            ];

        let transform = row_mat4_mul(rotate_on_x, *transform);

        // Build an index of joint names to anims
        let mut joint_animations = HashMap::new();
        for anim in animations.iter() {
            let joint_name = anim.target.split('/').next().unwrap();
            joint_animations.insert(joint_name, anim);
        }

        // Assuming all ColladaAnims have the same number of samples..
        let sample_count = animations[0].sample_times.len();

        // Assuming all ColladaAnims have the same duration..
        let duration = *animations[0].sample_times.last().unwrap();

        // Assuming constant sample rate
        let samples_per_second = sample_count as f32 / duration;

        let samples = (0 .. sample_count).map(|sample_index| {

            // Grab local poses for each joint from COLLADA animation if available,
            // falling back to identity matrix
            let local_poses: Vec<Matrix4<f32>> = skeleton.joints.iter().map(|joint| {
                match joint_animations.get(&joint.name[..]) {
                    Some(a) if joint.is_root() => row_mat4_mul(transform, a.sample_poses[sample_index]),
                    Some(a) => a.sample_poses[sample_index],
                    None => mat4_id(),
                }
            }).collect();

            // Convert local poses to Transforms (for interpolation)
            let local_poses: Vec<T> = local_poses.iter().map(|pose_matrix| {
                T::from_matrix(*pose_matrix)
            }).collect();

            AnimationSample {
                local_poses: local_poses,
            }
        }).collect();

        AnimationClip {
            samples_per_second: samples_per_second,
            samples: samples,
        }
    }

}

/// An instance of an AnimationClip which tracks playback parameters
pub struct ClipInstance<T: Transform> {
    /// Shared clip reference
    pub clip: Rc<AnimationClip<T>>,

    /// Controller clock time at animation start
    pub start_time: f32,

    /// Playback rate modifier, where 1.0 is original speed
    pub playback_rate: f32,

    /// Used to account for changes in playback rate
    pub time_offset: f32,
}

impl<T: Transform> ClipInstance<T> {

    pub fn new(clip: Rc<AnimationClip<T>>) -> ClipInstance<T> {
        ClipInstance {
            clip: clip,
            start_time: 0.0,
            playback_rate: 1.0,
            time_offset: 0.0,
        }
    }

    /// Adjust the playback rate of the clip without affecting the
    /// value of get_local_time for a given global time.
    pub fn set_playback_rate(&mut self, global_time: f32, new_rate: f32) {
        if self.playback_rate != new_rate {
            let local_time = self.get_local_time(global_time);
            self.time_offset = local_time - (global_time - self.start_time) * new_rate;
            self.playback_rate = new_rate;
        }
    }

    pub fn get_pose_at_time(&self, global_time: f32, blended_poses: &mut [T]) {
        self.clip.get_pose_at_time(self.get_local_time(global_time), blended_poses);
    }

    pub fn get_duration(&self) -> f32 {
        self.clip.get_duration()
    }

    fn get_local_time(&self, global_time: f32) -> f32 {
        (global_time - self.start_time) * self.playback_rate + self.time_offset
    }
}