From 2cfcf0d5cc3a002864f60edeb8c43eca5c1b11d5 Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Fri, 23 Feb 2024 12:11:32 -0500 Subject: [PATCH 1/3] [TEST] fix URL --- fury/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fury/tests/test_io.py b/fury/tests/test_io.py index dff705055..45dd8c018 100644 --- a/fury/tests/test_io.py +++ b/fury/tests/test_io.py @@ -242,7 +242,7 @@ def test_load_cubemap_texture(): def test_load_sprite_sheet(): sprite_URL = ( 'https://raw.githubusercontent.com/' - 'antrikshmisri/DATA/master/fury/0yKFTBQ.png' + 'fury-gl/fury-data/master/unittests/fury_sprite.png' ) with InTemporaryDirectory() as tdir: From 95cf73458cf54a5c319f30a41129c790ba53698e Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Fri, 23 Feb 2024 12:15:35 -0500 Subject: [PATCH 2/3] [DOC] fix typo --- docs/examples/viz_skinning.py | 2 +- docs/experimental/viz_canvas.py | 2 +- .../2021/2021-06-28-gsoc-devmessias-4.rst | 2 +- .../2021/2021-07-05-gsoc-devmessias-5.rst | 2 +- .../2021/2021-08-23-final-work-antriksh.rst | 2 +- .../2023/2023-01-24-final-report-praneeth.rst | 2 +- .../2023-08-21-joaodellagli-final-report.rst | 172 +++++++++--------- fury/transform.py | 2 +- fury/ui/tests/test_helpers.py | 6 +- 9 files changed, 96 insertions(+), 96 deletions(-) diff --git a/docs/examples/viz_skinning.py b/docs/examples/viz_skinning.py index c7fc035f0..5a83ab54f 100644 --- a/docs/examples/viz_skinning.py +++ b/docs/examples/viz_skinning.py @@ -32,7 +32,7 @@ # After we get the timeline object, We want to initialise the skinning process. # You can set `bones=true` to visualize each bone transformation. Additionally, -# you can set `lenght` of bones in the `initialise_skin` method. +# you can set `length` of bones in the `initialise_skin` method. # Note: Make sure to call this method before you initialize ShowManager, else # bones won't be added to the scene. diff --git a/docs/experimental/viz_canvas.py b/docs/experimental/viz_canvas.py index adaf34164..b3da4f5b1 100644 --- a/docs/experimental/viz_canvas.py +++ b/docs/experimental/viz_canvas.py @@ -194,7 +194,7 @@ def test_sh(): vec3 lin = 2.5*occ*vec3(1.0,1.00,1.00)*(0.6+0.4*nor.y); lin += 1.0*sss*vec3(1.0,0.95,0.70)*occ; - // surface-light interacion + // surface-light interaction col = mate.xyz * lin; } diff --git a/docs/source/posts/2021/2021-06-28-gsoc-devmessias-4.rst b/docs/source/posts/2021/2021-06-28-gsoc-devmessias-4.rst index cfceb132f..94fcaa35f 100644 --- a/docs/source/posts/2021/2021-06-28-gsoc-devmessias-4.rst +++ b/docs/source/posts/2021/2021-06-28-gsoc-devmessias-4.rst @@ -156,7 +156,7 @@ must have a way to lock the write/read if the memory resource is busy. Meanwhile the `multiprocessing.Arrays `__ already has a context which allows lock (.get_lock()) SharedMemory -dosen’t[2]. The use of abstract class allowed me to deal with those +doesn’t[2]. The use of abstract class allowed me to deal with those peculiarities. `commit 358402e `__ diff --git a/docs/source/posts/2021/2021-07-05-gsoc-devmessias-5.rst b/docs/source/posts/2021/2021-07-05-gsoc-devmessias-5.rst index 416ac6c1f..d1c4b81b6 100644 --- a/docs/source/posts/2021/2021-07-05-gsoc-devmessias-5.rst +++ b/docs/source/posts/2021/2021-07-05-gsoc-devmessias-5.rst @@ -35,7 +35,7 @@ tasks related with this PR: `#424`_ now is possible to control all the visual characteristics at runtime. - 2D Layout: Meanwhile 3d network representations are very usefully - for exploring a dataset is hard to convice a group of network + for exploring a dataset is hard to convince a group of network scientists to use a visualization system which doesn't allow 2d representations. Because of that I started to coding the 2d behavior in the network visualization system. diff --git a/docs/source/posts/2021/2021-08-23-final-work-antriksh.rst b/docs/source/posts/2021/2021-08-23-final-work-antriksh.rst index 4dd4702a2..8bf4f39bf 100644 --- a/docs/source/posts/2021/2021-08-23-final-work-antriksh.rst +++ b/docs/source/posts/2021/2021-08-23-final-work-antriksh.rst @@ -83,7 +83,7 @@ Objectives Completed - **Add Accordion2D UI element to the UI sub-module** - Added Accordion2D to the UI sub-module. This Ui element allows users to visulize data in a tree with depth of one. Each node has a title and a content panel. The children for each node can be N if and only if the children are not nodes themselves. The child UIs can be placed inside the content panel by passing some coordinates, which can be absolute or normalized w.r.t the node content panel size. Tests and two demos were added for this UI element. Below is a screenshot for reference + Added Accordion2D to the UI sub-module. This Ui element allows users to visualize data in a tree with depth of one. Each node has a title and a content panel. The children for each node can be N if and only if the children are not nodes themselves. The child UIs can be placed inside the content panel by passing some coordinates, which can be absolute or normalized w.r.t the node content panel size. Tests and two demos were added for this UI element. Below is a screenshot for reference .. image:: https://camo.githubusercontent.com/9395d0ea572d7f253a051823f02496450c9f79d19ff0baf32841ec648b6f2860/68747470733a2f2f692e696d6775722e636f6d2f7854754f645a742e706e67 :width: 200 diff --git a/docs/source/posts/2023/2023-01-24-final-report-praneeth.rst b/docs/source/posts/2023/2023-01-24-final-report-praneeth.rst index c6428ecfe..fdd6230bd 100644 --- a/docs/source/posts/2023/2023-01-24-final-report-praneeth.rst +++ b/docs/source/posts/2023/2023-01-24-final-report-praneeth.rst @@ -133,7 +133,7 @@ Other Objectives - **Grouping Shapes** - Many times we need to perform some actions on a group of shapes so here we are with the grouping feature using which you can group shapes together, reposition them, rotate them and delete them together. To activate grouping of shapes you have to be on selection mode then by holding **Ctrl** key select the required shapes and they will get highlighted. To remove shape from the group just hold the **Ctrl** and click the shape again it will get deselected. Then once eveything is grouped you can use the normal transformation as normal i.e. for translation just drag the shapes around and for rotation the rotation slider appears at usual lower left corner which can be used. + Many times we need to perform some actions on a group of shapes so here we are with the grouping feature using which you can group shapes together, reposition them, rotate them and delete them together. To activate grouping of shapes you have to be on selection mode then by holding **Ctrl** key select the required shapes and they will get highlighted. To remove shape from the group just hold the **Ctrl** and click the shape again it will get deselected. Then once everything is grouped you can use the normal transformation as normal i.e. for translation just drag the shapes around and for rotation the rotation slider appears at usual lower left corner which can be used. *Pull Requests:* diff --git a/docs/source/posts/2023/2023-08-21-joaodellagli-final-report.rst b/docs/source/posts/2023/2023-08-21-joaodellagli-final-report.rst index f56072574..33650149a 100644 --- a/docs/source/posts/2023/2023-08-21-joaodellagli-final-report.rst +++ b/docs/source/posts/2023/2023-08-21-joaodellagli-final-report.rst @@ -28,10 +28,10 @@ Google Summer of Code Final Work Product Abstract -------- -This project had the goal to implement 3D Kernel Density Estimation rendering to FURY. Kernel Density Estimation, or KDE, is a -statistical method that uses kernel smoothing for modeling and estimating the density distribution of a set of points defined -inside a given region. For its graphical implementation, it was used post-processing techniques such as offscreen rendering to -framebuffers and colormap post-processing as tools to achieve the desired results. This was completed with a functional basic KDE +This project had the goal to implement 3D Kernel Density Estimation rendering to FURY. Kernel Density Estimation, or KDE, is a +statistical method that uses kernel smoothing for modeling and estimating the density distribution of a set of points defined +inside a given region. For its graphical implementation, it was used post-processing techniques such as offscreen rendering to +framebuffers and colormap post-processing as tools to achieve the desired results. This was completed with a functional basic KDE rendering result, that relies on a solid and easy-to-use API, as well as some additional features. Proposed Objectives @@ -55,54 +55,54 @@ Objectives Completed - **Implement framebuffer usage in FURY** The first phase, addressed from *May/29* to *July/07*, started with the investigation of - `VTK's Framebuffer Object `_, a vital part of this project, to understand - how to use it properly. + `VTK's Framebuffer Object `_, a vital part of this project, to understand + how to use it properly. Framebuffer Objects, abbreviated as FBOs, are the key to post-processing effects in OpenGL, as they are used to render things offscreen and save the resulting image to a texture - that will be later used to apply the desired post-processing effects within the object's `fragment shader `_ - rendered to screen, in this case, a `billboard `_. In the case of the - `Kernel Density Estimation `_ post-processing effect, we need a special kind of FBO, one that stores textures' - values as floats, different from the standard 8-bit unsigned int storage. This is necessary because the KDE rendering involves rendering every KDE point calculation - to separate billboards, rendered to the same scene, which will have their intensities, divided by the number of points rendered, blended with - `OpenGL Additive Blending `_, and if a relative big number of points are rendered at the + that will be later used to apply the desired post-processing effects within the object's `fragment shader `_ + rendered to screen, in this case, a `billboard `_. In the case of the + `Kernel Density Estimation `_ post-processing effect, we need a special kind of FBO, one that stores textures' + values as floats, different from the standard 8-bit unsigned int storage. This is necessary because the KDE rendering involves rendering every KDE point calculation + to separate billboards, rendered to the same scene, which will have their intensities, divided by the number of points rendered, blended with + `OpenGL Additive Blending `_, and if a relative big number of points are rendered at the same time, 32-bit float precision is needed to guarantee that small-intensity values will not be capped to zero, and disappear. - After a month going through VTK's FBO documentation and weeks spent trying different approaches to this method, it would not work - properly, as some details seemed to be missing from the documentation, and asking the community haven't solved the problem as well. - Reporting that to my mentors, which unsuccessfully tried themselves to make it work, they decided it was better if another path was taken, using - `VTK's WindowToImageFilter `_ method as a workaround, described - in this `blogpost `_. This method helped the development of - three new functions to FURY, *window_to_texture()*, *texture_to_actor()* and *colormap_to_texture()*, that allow the passing of - different kinds of textures to FURY's actor's shaders, the first one to capture a window and pass it as a texture to an actor, - the second one to pass an external texture to an actor, and the third one to specifically pass a colormap as a texture to an - actor. It is important to say that *WindowToImageFilter()* is not the ideal way to make it work, as this method does not seem to + After a month going through VTK's FBO documentation and weeks spent trying different approaches to this method, it would not work + properly, as some details seemed to be missing from the documentation, and asking the community haven't solved the problem as well. + Reporting that to my mentors, which unsuccessfully tried themselves to make it work, they decided it was better if another path was taken, using + `VTK's WindowToImageFilter `_ method as a workaround, described + in this `blogpost `_. This method helped the development of + three new functions to FURY, *window_to_texture()*, *texture_to_actor()* and *colormap_to_texture()*, that allow the passing of + different kinds of textures to FURY's actor's shaders, the first one to capture a window and pass it as a texture to an actor, + the second one to pass an external texture to an actor, and the third one to specifically pass a colormap as a texture to an + actor. It is important to say that *WindowToImageFilter()* is not the ideal way to make it work, as this method does not seem to support float textures. However, a workaround to that is currently being worked on, as I will describe later on. *Pull Requests:* - **KDE Rendering Experimental Program (Needs major revision):** `https://github.com/fury-gl/fury/pull/804 `_ - - The result of this whole FBO and WindowToImageFilter experimentation is well documented in PR - `#804 `_ that implements an experimental version of a KDE rendering program. - The future of this PR, as discussed with my mentors, is to be better documented to be used as an example for developers on + + The result of this whole FBO and WindowToImageFilter experimentation is well documented in PR + `#804 `_ that implements an experimental version of a KDE rendering program. + The future of this PR, as discussed with my mentors, is to be better documented to be used as an example for developers on how to develop features in FURY with the tools used, and it shall be done soon. - **Shader-framebuffer integration** - The second phase, which initially was thought of as "Implement a shader that uses a colormap to render framebuffers" and "Escalate this - rendering for composing multiple framebuffers" was actually a pretty simple phase that could be addressed in one week, *July/10* - to *July/17*, done at the same time as the third phase goal, documented in this - `blogpost `_. As FURY already had a tool for generating and - using colormaps, they were simply connected to the shader part of the program as textures, with the functions explained above. + The second phase, which initially was thought of as "Implement a shader that uses a colormap to render framebuffers" and "Escalate this + rendering for composing multiple framebuffers" was actually a pretty simple phase that could be addressed in one week, *July/10* + to *July/17*, done at the same time as the third phase goal, documented in this + `blogpost `_. As FURY already had a tool for generating and + using colormaps, they were simply connected to the shader part of the program as textures, with the functions explained above. Below, is the result of the *matplotlib viridis* colormap passed to a simple gaussian KDE render: .. image:: https://raw.githubusercontent.com/JoaoDell/gsoc_assets/main/images/final_2d_plot.png :align: center :alt: Final 2D plot - That is also included in PR `#804 `_. Having the 2D plot ready, some time was taken to - figure out how to enable a 3D render, that includes rotation and other movement around the set rendered, which was solved by - learning about the callback properties that exist inside *VTK*. Callbacks are ways to enable code execution inside the VTK rendering - loop, enclosed inside *vtkRenderWindowInteractor.start()*. If it is desired to add a piece of code that, for example, passes a time + That is also included in PR `#804 `_. Having the 2D plot ready, some time was taken to + figure out how to enable a 3D render, that includes rotation and other movement around the set rendered, which was solved by + learning about the callback properties that exist inside *VTK*. Callbacks are ways to enable code execution inside the VTK rendering + loop, enclosed inside *vtkRenderWindowInteractor.start()*. If it is desired to add a piece of code that, for example, passes a time variable to the fragment shader over time, a callback function can be declared: .. code-block:: python @@ -117,8 +117,8 @@ Objectives Completed showm.add_iren_callback(callback_function, "RenderEvent") - The piece of code above created a function that updates the time variable *t* in every *"RenderEvent"*, and passes it to the - fragment shader. With that property, the camera and some other parameters could be updated, which enabled 3D visualization, that + The piece of code above created a function that updates the time variable *t* in every *"RenderEvent"*, and passes it to the + fragment shader. With that property, the camera and some other parameters could be updated, which enabled 3D visualization, that then, outputted the following result, using *matplotlib inferno* colormap: .. image:: https://raw.githubusercontent.com/JoaoDell/gsoc_assets/main/images/3d_kde_gif.gif @@ -126,37 +126,37 @@ Objectives Completed :alt: 3D Render gif - **KDE Calculations** (ongoing) - As said before, the second and third phases were done simultaneously, so after having a way to capture the window and use it as a - texture ready, the colormap ready, and an initial KDE render ready, all it was needed to do was to improve the KDE calculations. - As this `Wikipedia page `_ explains, a KDE calculation is to estimate an - abstract density around a set of points defined inside a given region with a kernel, that is a function that models the density + As said before, the second and third phases were done simultaneously, so after having a way to capture the window and use it as a + texture ready, the colormap ready, and an initial KDE render ready, all it was needed to do was to improve the KDE calculations. + As this `Wikipedia page `_ explains, a KDE calculation is to estimate an + abstract density around a set of points defined inside a given region with a kernel, that is a function that models the density around a point based on its associated distribution :math:`\sigma`. - A well-known kernel is, for example, the **Gaussian Kernel**, that says that the density around a point :math:`p` with distribution + A well-known kernel is, for example, the **Gaussian Kernel**, that says that the density around a point :math:`p` with distribution :math:`\sigma` is defined as: .. math:: GK_{\textbf{p}, \sigma} (\textbf{x}) = e^{-\frac{1}{2}\frac{||\textbf{x} - \textbf{p}||^2}{\sigma^2}} - Using that kernel, we can calculate the KDE of a set of points :math:`P` with associated distributions :math:`S` calculating their individual + Using that kernel, we can calculate the KDE of a set of points :math:`P` with associated distributions :math:`S` calculating their individual Gaussian distributions, summing them up and dividing them by the total number of points :math:`n`: .. math:: KDE(A, S)=\frac{1}{n}\sum_{i = 0}^{n}GK(x, p_{i}, \sigma_{i}) - So I dove into implementing all of that into the offscreen rendering part, and that is when the lack of a float framebuffer would - charge its cost. As it can be seen above, just calculating each point's density isn't the whole part, as I also need to divide - everyone by the total number of points :math:`n`, and then sum them all. The problem is that, if the number of points its big enough, - the individual densities will be really low, and that would not be a problem for a 32-bit precision float framebuffer, but that is - *definitely* a problem for a 8-bit integer framebuffer, as small enough values will simply underflow and disappear. That issue is - currently under investigation, and some solutions have already being presented, as I will show in the **Objectives in Progress** + So I dove into implementing all of that into the offscreen rendering part, and that is when the lack of a float framebuffer would + charge its cost. As it can be seen above, just calculating each point's density isn't the whole part, as I also need to divide + everyone by the total number of points :math:`n`, and then sum them all. The problem is that, if the number of points its big enough, + the individual densities will be really low, and that would not be a problem for a 32-bit precision float framebuffer, but that is + *definitely* a problem for a 8-bit integer framebuffer, as small enough values will simply underflow and disappear. That issue is + currently under investigation, and some solutions have already being presented, as I will show in the **Objectives in Progress** section. - Apart from that, after having the experimental program ready, I focused on modularizing it into a functional and simple API - (without the :math:`n` division for now), and I could get a good set of results from that. The API I first developed implemented the - *EffectManager* class, responsible for managing all of the behind-the-scenes steps necessary for the kde render to work, + Apart from that, after having the experimental program ready, I focused on modularizing it into a functional and simple API + (without the :math:`n` division for now), and I could get a good set of results from that. The API I first developed implemented the + *EffectManager* class, responsible for managing all of the behind-the-scenes steps necessary for the kde render to work, encapsulated inside the *ÈffectManager.kde()* method. It had the following look: .. code-block:: python @@ -180,8 +180,8 @@ Objectives Completed :align: center :alt: API 3D KDE plot - And this was not the only feature I had implemented for this API, as the use of *WindowToImageFilter* method opened doors for a - whole new world for FURY: The world of post-processing effects. With this features setup, I managed to implement a *gaussian blur* + And this was not the only feature I had implemented for this API, as the use of *WindowToImageFilter* method opened doors for a + whole new world for FURY: The world of post-processing effects. With this features setup, I managed to implement a *gaussian blur* effect, a *grayscale* effect and a *Laplacian* effect for calculating "borders": .. image:: https://raw.githubusercontent.com/JoaoDell/gsoc_assets/main/images/gaussian_blur.png @@ -195,15 +195,15 @@ Objectives Completed .. image:: https://raw.githubusercontent.com/JoaoDell/gsoc_assets/main/images/laplacian1.gif :align: center :alt: Laplacian effect - - As this wasn't the initial goal of the project and I still had several issues to deal with, I have decided to leave these features as a + + As this wasn't the initial goal of the project and I still had several issues to deal with, I have decided to leave these features as a future addition. - Talking with my mentors, we realized that the first KDE API, even though simple, could lead to bad usage from users, as the - *em.kde()* method, that outputted a *FURY actor*, had dependencies different from any other object of its kind, making it a new - class of actors, which could lead to confusion and bad handling. After some pair programming sessions, they instructed me to take - a similar, but different road from what I was doing, turning the kde actor into a new class, the *KDE* class. This class would - have almost the same set of instructions present in the prior method, but it would break them in a way it would only be completely + Talking with my mentors, we realized that the first KDE API, even though simple, could lead to bad usage from users, as the + *em.kde()* method, that outputted a *FURY actor*, had dependencies different from any other object of its kind, making it a new + class of actors, which could lead to confusion and bad handling. After some pair programming sessions, they instructed me to take + a similar, but different road from what I was doing, turning the kde actor into a new class, the *KDE* class. This class would + have almost the same set of instructions present in the prior method, but it would break them in a way it would only be completely set up after being passed to the *EffectManager* via its add function. Below, how the refactoring handles it: .. code-block:: python @@ -221,13 +221,13 @@ Objectives Completed showm.start() - Which outputted the same results as shown above. It may have cost some simplicity as we are now one line farther from having it + Which outputted the same results as shown above. It may have cost some simplicity as we are now one line farther from having it working, but it is more explicit in telling the user this is not just a normal actor. - Another detail I worked on was the kernel variety. The Gaussian Kernel isn't the only one available to model density distributions, - there are several others that can do that job, as it can be seen in this `scikit-learn piece of documentation `_ - and this `Wikipedia page on kernels `_. Based on the scikit-learn KDE - implementation, I worked on implementing the following kernels inside our API, that can be chosen as a parameter when calling the + Another detail I worked on was the kernel variety. The Gaussian Kernel isn't the only one available to model density distributions, + there are several others that can do that job, as it can be seen in this `scikit-learn piece of documentation `_ + and this `Wikipedia page on kernels `_. Based on the scikit-learn KDE + implementation, I worked on implementing the following kernels inside our API, that can be chosen as a parameter when calling the *KDE* class: * Cosine @@ -247,12 +247,12 @@ Objectives Completed *Pull Requests*: - **First Stage of the KDE Rendering API (will be merged soon)**: `https://github.com/fury-gl/fury/pull/826 `_ - - All of this work culminated in PR `#826 `_, that proposes to add the first stage of - this API (there are some details yet to be completed, like the :math:`n` division) to FURY. This PR added the described API, and also - proposed some minor changes to some already existing FURY functions related to callbacks, changes necessary for this and other - future applications that would use it to work. It also added the six kernels described, and a simple documented example on how - to use this feature. + + All of this work culminated in PR `#826 `_, that proposes to add the first stage of + this API (there are some details yet to be completed, like the :math:`n` division) to FURY. This PR added the described API, and also + proposed some minor changes to some already existing FURY functions related to callbacks, changes necessary for this and other + future applications that would use it to work. It also added the six kernels described, and a simple documented example on how + to use this feature. Other Objectives ---------------- @@ -268,14 +268,14 @@ Objectives in Progress ---------------------- - **KDE Calculations** (ongoing) - The KDE rendering, even though almost complete, have the $n$ division, an important step, missing, as this normalization allows colormaps - to cover the whole range o values rendered. The lack of a float FBO made a big difference in the project, as the search for a functional implementation of it not only delayed the project, but it is vital for + The KDE rendering, even though almost complete, have the $n$ division, an important step, missing, as this normalization allows colormaps + to cover the whole range o values rendered. The lack of a float FBO made a big difference in the project, as the search for a functional implementation of it not only delayed the project, but it is vital for the correct calculations to work. - For the last part, a workaround thought was to try an approach I later figured out is an old one, as it can be check in - `GPU Gems 12.3.3 section `_: - If I need 32-bit float precision and I got 4 8-bit integer precision available, why not trying to pack this float into this RGBA - texture? I have first tried to do one myself, but it didn't work for some reason, so I tried `Aras Pranckevičius `_ + For the last part, a workaround thought was to try an approach I later figured out is an old one, as it can be check in + `GPU Gems 12.3.3 section `_: + If I need 32-bit float precision and I got 4 8-bit integer precision available, why not trying to pack this float into this RGBA + texture? I have first tried to do one myself, but it didn't work for some reason, so I tried `Aras Pranckevičius `_ implementation, that does the following: .. code-block:: GLSL @@ -300,15 +300,15 @@ Objectives in Progress :align: center :alt: Blurred result - But it is not an ideal solution as well, as it may lead to distortions in the actual density values, depending on the application of - the KDE. Now, my goal is to first find the root of the noise problem, and then, if that does not work, try to make the gaussian filter + But it is not an ideal solution as well, as it may lead to distortions in the actual density values, depending on the application of + the KDE. Now, my goal is to first find the root of the noise problem, and then, if that does not work, try to make the gaussian filter work. - Another detail that would be a good addition to the API is UI controls. Filipi, one of my mentors, told me it would be a good feature - if the user could control the intensities of the bandwidths for a better structural visualization of the render, and knowing FURY already - have a good set of `UI elements `_, I just needed to integrate - that into my program via callbacks. I tried implementing an intensity slider. However, for some reason, it is making the program crash - randomly, for reasons I still don't know, so that is another issue under investigation. Below, we show a first version of that feature, + Another detail that would be a good addition to the API is UI controls. Filipi, one of my mentors, told me it would be a good feature + if the user could control the intensities of the bandwidths for a better structural visualization of the render, and knowing FURY already + have a good set of `UI elements `_, I just needed to integrate + that into my program via callbacks. I tried implementing an intensity slider. However, for some reason, it is making the program crash + randomly, for reasons I still don't know, so that is another issue under investigation. Below, we show a first version of that feature, which was working before the crashes: .. image:: https://raw.githubusercontent.com/JoaoDell/gsoc_assets/main/images/slider.gif @@ -346,7 +346,7 @@ Timeline +---------------------+----------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Week 6 (10-07-2022) | Things are Starting to Build Up | `FURY `__ - `Python `__ | +---------------------+----------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| Week 7 (17-07-2022) | Experimentation Done | `FURY `__ - `Python `__ | +| Week 7 (17-07-2022) | Experimentation Done | `FURY `__ - `Python `__ | +---------------------+----------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Week 8 (24-07-2022) | The Birth of a Versatile API | `FURY `__ - `Python `__ | +---------------------+----------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -356,5 +356,5 @@ Timeline +---------------------+----------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Week 11 (14-08-2022)| A Refactor is Sometimes Needed | `FURY `__ - `Python `__ | +---------------------+----------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| Week 12 (21-08-2022)| Now That is (almost) a Wrap! | `FURY `__ - `Python `__ | +| Week 12 (21-08-2022)| Now That is (almost) a Wrap! | `FURY `__ - `Python `__ | +---------------------+----------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/fury/transform.py b/fury/transform.py index ff5db9479..24faa9994 100644 --- a/fury/transform.py +++ b/fury/transform.py @@ -149,7 +149,7 @@ def sphere2cart(r, theta, phi): Returns --------- x : array - x coordinate(s) in Cartesion space + x coordinate(s) in Cartesian space y : array y coordinate(s) in Cartesian space z : array diff --git a/fury/ui/tests/test_helpers.py b/fury/ui/tests/test_helpers.py index 3584ee48e..fbf3bde13 100644 --- a/fury/ui/tests/test_helpers.py +++ b/fury/ui/tests/test_helpers.py @@ -22,7 +22,7 @@ def test_clip_overflow(): clip_overflow(text, text.size[0]) npt.assert_equal('Hello', text.message) - text.message = 'Hello wassup' + text.message = "Hello what's up?" clip_overflow(text, text.size[0]) npt.assert_equal('He...', text.message) @@ -63,9 +63,9 @@ def test_wrap_overflow(): wrap_overflow(text, text.size[0]) npt.assert_equal('Hello', text.message) - text.message = 'Hello wassup' + text.message = "Hello what's up?" wrap_overflow(text, text.size[0]) - npt.assert_equal('Hello\n wass\nup', text.message) + npt.assert_equal("Hello\n what\n's up\n?", text.message) text.message = 'A very very long message to clip text overflow' wrap_overflow(text, text.size[0]) From 26cea123f4fc990aa18c22df9c593640f7cf8d6b Mon Sep 17 00:00:00 2001 From: Serge Koudoro Date: Fri, 23 Feb 2024 13:29:08 -0500 Subject: [PATCH 3/3] add dependabot --- .github/dependabot.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..a57e0b729 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +# Set update schedule for GitHub Actions + +version: 2 +updates: + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + # Check for updates to GitHub Actions every week + interval: "weekly" + groups: + actions: + patterns: + - "*"