Author: hmnjr0lnuhzp

  • orx-imp-vec

    orx-imp-vec

    orx-imp-vec crate orx-imp-vec crate orx-imp-vec documentation

    ImpVec stands for immutable push vector 👿, it is a data structure which allows appending elements with a shared reference.

    Specifically, it extends vector capabilities with the following two methods:

    Note that both of these methods can be called with &self rather than &mut self.

    Motivation

    Appending to a vector with a shared reference sounds unconventional, and it is.

    From another perspective, however, appending an element to the end of the vector does not mutate any of already added elements or change their positions. It can be argued that it is not different than creating a new element within the scope. This statement will be clear with the following example.

    The challenge is to define a type-safe, recursive and expressive expression builder. In our toy example, an expression can either be a symbol, or addition or subtraction of two expressions. The final desired ergonomic solution is as follows:

    let scope = Scope::default();
    
    // instantiate some symbols
    let x = scope.symbol("x");
    let y = scope.symbol("y");
    assert_eq!(&x.to_string(), "x");
    assert_eq!(&y.to_string(), "y");
    
    // apply binary operations to create new symbols
    let p = x + y;
    assert_eq!(&p.to_string(), "x + y");
    
    let q = x - y;
    assert_eq!(&q.to_string(), "x - y");
    
    // and further binary operations
    let t = p + q;
    assert_eq!(&t.to_string(), "x + y + x - y");
    
    // we only use 'scope' to create symbols
    // but in the background, all expressions are collected in our scope
    let all_expressions: Vec<_> = scope.expressions.iter().map(|x| x.to_string()).collect();
    assert_eq!(
        all_expressions,
        ["x", "y", "x + y", "x - y", "x + y + x - y"]
    );

    This at first seemed impossible in safe rust for way too many reasons. However, it is conveniently possible using an ImpVec. You may run the example in expressions.rs by cargo run --example expressions, or see the details of the implementation below.

    Complete Implementation
    use orx_imp_vec::*;
    use std::{
        fmt::Display,
        ops::{Add, Sub},
    };
    
    /// A scope for expressions.
    #[derive(Default)]
    struct Scope<'a> {
        expressions: ImpVec<Expr<'a>>,
    }
    
    impl<'a> Scope<'a> {
        /// Bottom of the expressions recursion, the symbol primitive
        fn symbol(&'a self, name: &'static str) -> ExprInScope<'a> {
            let expr = Expr::Symbol(name);
            self.expressions.imp_push(expr);
            ExprInScope {
                scope: self,
                expr: &self.expressions[self.expressions.len() - 1],
            }
        }
    }
    
    /// A recursive expression with three demo variants
    enum Expr<'a> {
        Symbol(&'static str),
        Addition(&'a Expr<'a>, &'a Expr<'a>),
        Subtraction(&'a Expr<'a>, &'a Expr<'a>),
    }
    
    impl<'a> Display for Expr<'a> {
        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
            match self {
                Expr::Symbol(x) => write!(f, "{}", x),
                Expr::Addition(x, y) => write!(f, "{} + {}", x, y),
                Expr::Subtraction(x, y) => write!(f, "{} - {}", x, y),
            }
        }
    }
    
    /// Expression in a scope:
    /// * it knows what it is
    /// * it knows which scope it belongs to
    ///
    /// It can implement Copy which turns out to be extremely important!
    #[derive(Clone, Copy)]
    struct ExprInScope<'a> {
        scope: &'a Scope<'a>,
        expr: &'a Expr<'a>,
    }
    
    impl<'a> ExprInScope<'a> {
        /// Recall, it knows the scope it belongs to,
        /// and can check it in O(1)
        fn belongs_to_same_scope(&self, other: Self) -> bool {
            let self_scope = self.scope as *const Scope;
            let other_scope = other.scope as *const Scope;
            self_scope == other_scope
        }
    }
    impl<'a> Display for ExprInScope<'a> {
        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
            write!(f, "{}", self.expr)
        }
    }
    
    impl<'a> Add for ExprInScope<'a> {
        type Output = ExprInScope<'a>;
    
        /// We can create an expression by adding two expressions
        ///
        /// Where do we store the new expression?
        ///
        /// Of course, in the scope that both expressions belong to.
        /// And we can do so by `imp_push`.
        ///
        /// # Panics
        ///
        /// Panics if the lhs & rhs do not belong to the same scope.
        fn add(self, rhs: Self) -> Self::Output {
            assert!(self.belongs_to_same_scope(rhs));
            let expressions = &self.scope.expressions;
            let expr = Expr::Addition(self.expr, rhs.expr);
            expressions.imp_push(expr);
            ExprInScope {
                scope: self.scope,
                expr: &expressions[expressions.len() - 1],
            }
        }
    }
    
    impl<'a> Sub for ExprInScope<'a> {
        type Output = ExprInScope<'a>;
    
        /// Similarly, we can create an expression by subtracting two expressions
        /// 
        /// # Panics
        ///
        /// Panics if the lhs & rhs do not belong to the same scope.
        fn sub(self, rhs: Self) -> Self::Output {
            assert!(self.belongs_to_same_scope(rhs));
            let expressions = &self.scope.expressions;
            let expr = Expr::Subtraction(self.expr, rhs.expr);
            expressions.imp_push(expr);
            ExprInScope {
                scope: self.scope,
                expr: &expressions[expressions.len() - 1],
            }
        }
    }
    
    let scope = Scope::default();
    
    // instantiate some symbols
    let x = scope.symbol("x");
    let y = scope.symbol("y");
    assert_eq!(&x.to_string(), "x");
    assert_eq!(&y.to_string(), "y");
    
    // apply binary operations to create new symbols
    let p = x + y;
    assert_eq!(&p.to_string(), "x + y");
    
    let q = x - y;
    assert_eq!(&q.to_string(), "x - y");
    
    // and further binary operations
    let t = p + q;
    assert_eq!(&t.to_string(), "x + y + x - y");
    
    // we only use 'scope' to create symbols
    // but in the background, all expressions are collected in our scope
    let all_expressions: Vec<_> = scope.expressions.iter().map(|x| x.to_string()).collect();
    assert_eq!(
        all_expressions,
        ["x", "y", "x + y", "x - y", "x + y + x - y"]
    );

    You may find another demonstration where an ImpVec mimics a scope in the system_of_linear_inequalities.rs example.

    Finally, you may find the initial motivation of this crate and the ImpVec type in imp-vec-motivation article.

    Safety

    It is natural to expect that appending elements to a vector does not affect already added elements. However, this is usually not the case due to underlying memory management. For instance, std::vec::Vec may move already added elements to different memory locations to maintain the contagious layout of the vector.

    PinnedVec prevents such implicit changes in memory locations. It guarantees that push and extend methods keep memory locations of already added elements intact. Therefore, it is perfectly safe to hold on to references of the vector while appending elements.

    Consider the classical example that does not compile, which is often presented to highlight the safety guarantees of rust:

    let mut vec = vec![0, 1, 2, 3];
    
    let ref_to_first = &vec[0];
    assert_eq!(ref_to_first, &0);
    
    vec.push(4);
    
    // does not compile due to the following reason:  cannot borrow `vec` as mutable because it is also borrowed as immutable
    // assert_eq!(ref_to_first, &0);

    This beloved feature of the borrow checker of rust is not required for imp_push and imp_extend_from_slice methods of ImpVec since these methods do not require a &mut self reference. Therefore, the following code compiles and runs perfectly safely.

    use orx_imp_vec::*;
    
    let mut vec = ImpVec::new();
    vec.extend_from_slice(&[0, 1, 2, 3]);
    
    let ref_to_first = &vec[0];
    assert_eq!(ref_to_first, &0);
    
    vec.imp_push(4);
    assert_eq!(vec.len(), 5);
    
    vec.imp_extend_from_slice(&[6, 7]);
    assert_eq!(vec.len(), 7);
    
    assert_eq!(ref_to_first, &0);

    Parallelization

    ImpVec implements ConcurrentCollection provided that the wrapped PinnedVec is a concurrent collection (all known implementations satisfy this).

    Therefore, when orx_parallel crate is included, ImpVec also automatically implements ParallelizableCollection.

    This means that computations over the vector can be efficiently parallelized:

    • imp_vec.par() returns a parallel iterator over references to its elements, and
    • imp_vec.into_par() consumes the vector and returns a parallel iterator of the owned elements.

    You may find demonstrations in demo_parallelization and bench_parallelization examples.

    Contributing

    Contributions are welcome! If you notice an error, have a question or think something could be improved, please open an issue or create a PR.

    License

    Dual-licensed under Apache 2.0 or MIT.

    Visit original content creator repository https://github.com/orxfun/orx-imp-vec
  • Cloth-Sim


    Logo

    Cloth-Simulator

    A spring-model cloth physics simulator made using C/C++
    Report Bug


    Preview

    demo.mp4

    As of 30/12/2021, Cloth-Sim features a cloth mesh importer (in the form of a WaveFront obj file), a cloth mesh exporter, a cloth-sphere and cloth-plane collision simulation with options to apply forces such as angular velocity and a cloth free-fall simulation with options to apply forces such as wind direction

    Future Releases

    Features that I would like to work towards implementing

    • Adding textures/lighting and face triangulation
    • Improving the spring-model
    • Increasing performance (less load with more springs)
    • More simulation scenarios

    Feel free to suggest some in the issues tab!

    How

    To create a cloth mesh, you simply need an easy WaveFront obj editor so that you can import the obj file. The simplest way is to just create a plane in blender and add equal number of cut loops on both the horizontal and vertical. Export as quads.

    Cloth Mesh

    The program uses the mass-spring-damper model to model the cloth. In this case, each vertex in the mesh would be considered a mass (particle with a fixed weight). After loading in the cloth mesh, springs are created using adjacent and diagonal connections between the particles as shown in the following diagram.

    Spring Model

    Then to simulate the cloth, you apply the spring force and gravity to all of the created springs (and additional ones depending on the simulation such as friction or wind)

    Force

    The parameters that worked for me were

    mass = 0.5
    ks = 10
    kd = 5
    g = -0.981
    

    Installation

    Linux

    Dependencies include OpenGL 4.50, libdl, glfw3, pthread, glm

    git clone https://github.com/ranaxdev/Masters/
    cd Animation_Simulation/Cloth-Sim
    mkdir Release
    cd Release
    cmake -DCMAKE_BUILD_TYPE=Release ../
    make

    Windows

    I’m currently working on releasing an executable so that you don’t have to use cmake to build it on windows

    Libraries Used

    • GLAD : OpenGL Loading Library

    • GLM : OpenGL Mathematics

    • GLFW : A multi-platform library for OpenGL, OpenGL ES, Vulkan, window and input

    • Dear ImGui : Bloat-free Graphical User interface for C++ with minimal dependencies

    License

    Distributed under the Apache 2.0 License. See LICENSE for more information.

    Contact

    S.R. Qaiser – @pitu_devsc21srq@leeds.ac.uk

    Project Link: https://github.com/ranaxdev/Cloth-Sim

    Visit original content creator repository https://github.com/ranaxdev/Cloth-Sim
  • GRE-Prep-Tool

    GRE Preparation Tool

    A powerful tool to prepare for GRE using Command Line Terminal
    The word lists are provided by Vocabulary.com.

    forthebadge

    Demo

    🛠 Installation & Set Up

    1. Clone this repository

      git clone https://github.com/pycoder2000/GRE-Prep-Tool.git
    2. Change directories

      cd GRE-Prep-Tool
    3. Install dependencies

      pip install -r requirements.txt
    4. ⚠️ Change this date: Change this date to start using the Stats feature.

      # The day you start using this program in dd/mm/yyyy format
      StartDate = "dd/mm/yyyy"
    5. Run Project

      python main.py
      Add alias to terminal (optional)
      # Add the line below to your .bashrc or .zshrc file (only on MacOS)
      alias gre = 'python <Location to Folder>/GRE-Prep-Tool/main.py'

    📚 Vocabulary Lists

    1. Manhattan GRE Complete
    2. GRE Complete Vocabulary List
    3. Barrons 333
    4. 900+ Essential GRE Words
    5. Word Power Made Easy
    6. GRE101
    7. High Frequency Words

    ✨ Features

    1. Vocabulary Addition Add vocabulary lists from vocabulary.com

      • You can add as many vocab lists as you want. Just add the link and the scraper module will scrape the list and save it.
      • Currently 7 lists are added. Details provided in Vocabulary Lists section above.
    2. Learn from lists: Learn words from any of the provided lists

      • An interactive learner is created to memorize the word meanings
      • Store learnt vocabulary in TestedWords.json
    3. Tests: Take tests to memorize the word meanings

      • Supports 4 different types of tests:
        1. MCQ (Learnt Words)
        2. MCQ (Random Words)
        3. Written Test (Learnt Words)
        4. Written Test (Random Words)
      • Also track the time taken to complete the tests.
    4. Word Search: Search for any word in the vocabulary

      • The vocabulary consists of all the words in all the lists.
    5. Stats: Display the statistics of your performance

      • You can look at your Streak Calendar which shows the dates when you practiced.
      • Maintain streaks
      • Get detailed analysis of the score and time taken for every test and compare your performance

    🪜 Folder Structure

    📦 GRE-Prep-Tool
    ├── 📝 GREWordList.json       # Contains the list of words categorized by their list names
    ├── 📝 TestedWords.json       # Contains the list of words that you have learnt
    ├── 📝 TestScores.csv         # Contains the test scores
    ├── 📝 requirements.txt       # Contains the requirements needed for running this project
    ├── 📝 Stats.txt              # Contains Streak information
    ├── 📝 vocabulary.json        # Contains all the words in the vocabulary
    └── 📝 main.py                # Driver code for the program

    📍 RoadMap

    • Fix Scraping from Vocabulary.com
    • Add more tests
    • Track time taken for tests
    • Add statistics for tests
    • Add daily streak
    • Clean and refactor code
    • Add feature to save test scores
    • Ability to remove words from TestedWords.json
    • Charts to compare performance
    • Add serial revision
    • Add finding definition for an individual word
    • Create %tile score based on performance
    • Universities available with the score range

    🔗 Links

    twitter github linkedin

    🍰 Contributing

    Any contributions you make are greatly appreciated.

    If you have a suggestion that would make this better, please fork the repo and create a pull request.

    1. Fork the Project

    2. Commit your Changes

      git commit -m 'Add some Feature'
    3. Push to the Branch

      git push origin main
    4. Open a Pull Request

    PRs Welcome

    Don’t forget to give the project a star! Thanks again!

    🎉 Thanks

    This project is an highly modified and working version of this Github project.

    Visit original content creator repository https://github.com/pycoder2000/GRE-Prep-Tool
  • go-parquet-tools

    go-parquet-tools

    Alternative to pypi parquet-tools in Golang.

    You can show content/schema of parquet file(s) on local disk or on Amazon S3. It is incompatible with original parquet-tools. go-parquet-tools is faster because this is implemented in golang.

    Install

    go install github.com/ktrueda/go-parquet-tools@latest

    Usage

    go-parquet-tools csv test_resources/test1.parquet
    one,two,three
    -1,foo,true
    <nil>,bar,false
    2.5,baz,true

    go-parquet-tools show --nil None "test_resources/*"
    +------+-----+-------+
    | one  | two | three |
    +------+-----+-------+
    | -1   | foo | true  |
    | None | bar | false |
    | 2.5  | baz | true  |
    | -1   | foo | true  |
    | None | bar | false |
    | 2.5  | baz | true  |
    +------+-----+-------+

    go-parquet-tools show s3://foo/test1.parquet
    Downloaded s3://foo/test.parquet to /var/folders/f3/9l_qwscs3z94m3yw255bw4l40000gn/T/9ed16365-58e2-40f2-a492-e8477b418a0f.parquet .
    +-------+-----+-------+
    | one   | two | three |
    +-------+-----+-------+
    | -1    | foo | true  |
    | <nil> | bar | false |
    | 2.5   | baz | true  |
    +-------+-----+-------+
    go-parquet-tools inspect test_resources/test1.parquet
    insepct output

    Version:  1
    Schema:
            ######### schema #########
            Type:  <nil>
            TypeLength:  <nil>
            RepetitionType:  REQUIRED
            Name:  schema
            NumChildren:  0xc0000288d8
            ConvertedType:  <nil>
            Scale:  <nil>
            Precision:  <nil>
            FieldID:  <nil>
            LogicalType:  <nil>
            ######### one #########
            Type:  DOUBLE
            TypeLength:  <nil>
            RepetitionType:  OPTIONAL
            Name:  one
            NumChildren:  <nil>
            ConvertedType:  <nil>
            Scale:  <nil>
            Precision:  <nil>
            FieldID:  <nil>
            LogicalType:  <nil>
            ######### two #########
            Type:  BYTE_ARRAY
            TypeLength:  <nil>
            RepetitionType:  OPTIONAL
            Name:  two
            NumChildren:  <nil>
            ConvertedType:  UTF8
            Scale:  <nil>
            Precision:  <nil>
            FieldID:  <nil>
            LogicalType:  LogicalType({STRING:StringType({}) MAP:<nil> LIST:<nil> ENUM:<nil> DECIMAL:<nil> DATE:<nil> TIME:<nil> TIMESTAMP:<nil> INTEGER:<nil> UNKNOWN:<nil> JSON:<nil> BSON:<nil> UUID:<nil>})
            ######### three #########
            Type:  BOOLEAN
            TypeLength:  <nil>
            RepetitionType:  OPTIONAL
            Name:  three
            NumChildren:  <nil>
            ConvertedType:  <nil>
            Scale:  <nil>
            Precision:  <nil>
            FieldID:  <nil>
            LogicalType:  <nil>
    NumRows:  3
    RowGroups:
            Columns:
                    #########
                    FilePath  <nil>
                    FileOffset  108
                    MetaData.Type  DOUBLE
                    MetaData.Encodings  [PLAIN_DICTIONARY PLAIN RLE]
                    MetaData.PathInSchema  [one]
                    MetaData.Codec  SNAPPY
                    MetaData.NumValues  3
                    MetaData.TotalUncompressedSize  100
                    MetaData.TotalCompressedSize  104
                    MetaData.KeyValueMetadata  []
                    MetaData.DataPageOffset  36
                    MetaData.IndexPageOffset  <nil>
                    MetaData.DictionaryPageOffset  0xc000028930
                    MetaData.Statistics  Statistics({Max:[0 0 0 0 0 0 4 64] Min:[0 0 0 0 0 0 240 191] NullCount:0xc000028938 DistinctCount:<nil> MaxValue:[0 0 0 0 0 0 4 64] MinValue:[0 0 0 0 0 0 240 191]})
                    MetaData.EncodingStats  [PageEncodingStats({PageType:DICTIONARY_PAGE Encoding:PLAIN_DICTIONARY Count:1}) PageEncodingStats({PageType:DATA_PAGE Encoding:PLAIN_DICTIONARY Count:1})]
                    MetaData.BloomFilterOffset  <nil>
                    OffsetIndexOffset  <nil>
                    OffsetIndexLength  <nil>
                    ColumnIndexOffset  <nil>
                    ColumnIndexLength  <nil>
                    CryptoMeatadata  <nil>
                    EncryptedColumnMetadata  []
                    #########
                    FilePath  <nil>
                    FileOffset  281
                    MetaData.Type  BYTE_ARRAY
                    MetaData.Encodings  [PLAIN_DICTIONARY PLAIN RLE]
                    MetaData.PathInSchema  [two]
                    MetaData.Codec  SNAPPY
                    MetaData.NumValues  3
                    MetaData.TotalUncompressedSize  76
                    MetaData.TotalCompressedSize  80
                    MetaData.KeyValueMetadata  []
                    MetaData.DataPageOffset  238
                    MetaData.IndexPageOffset  <nil>
                    MetaData.DictionaryPageOffset  0xc000028948
                    MetaData.Statistics  Statistics({Max:[] Min:[] NullCount:0xc000028950 DistinctCount:<nil> MaxValue:[102 111 111] MinValue:[98 97 114]})
                    MetaData.EncodingStats  [PageEncodingStats({PageType:DICTIONARY_PAGE Encoding:PLAIN_DICTIONARY Count:1}) PageEncodingStats({PageType:DATA_PAGE Encoding:PLAIN_DICTIONARY Count:1})]
                    MetaData.BloomFilterOffset  <nil>
                    OffsetIndexOffset  <nil>
                    OffsetIndexLength  <nil>
                    ColumnIndexOffset  <nil>
                    ColumnIndexLength  <nil>
                    CryptoMeatadata  <nil>
                    EncryptedColumnMetadata  []
                    #########
                    FilePath  <nil>
                    FileOffset  388
                    MetaData.Type  BOOLEAN
                    MetaData.Encodings  [PLAIN RLE]
                    MetaData.PathInSchema  [three]
                    MetaData.Codec  SNAPPY
                    MetaData.NumValues  3
                    MetaData.TotalUncompressedSize  40
                    MetaData.TotalCompressedSize  42
                    MetaData.KeyValueMetadata  []
                    MetaData.DataPageOffset  346
                    MetaData.IndexPageOffset  <nil>
                    MetaData.DictionaryPageOffset  <nil>
                    MetaData.Statistics  Statistics({Max:[1] Min:[0] NullCount:0xc000028970 DistinctCount:<nil> MaxValue:[1] MinValue:[0]})
                    MetaData.EncodingStats  [PageEncodingStats({PageType:DATA_PAGE Encoding:PLAIN Count:1})]
                    MetaData.BloomFilterOffset  <nil>
                    OffsetIndexOffset  <nil>
                    OffsetIndexLength  <nil>
                    ColumnIndexOffset  <nil>
                    ColumnIndexLength  <nil>
                    CryptoMeatadata  <nil>
                    EncryptedColumnMetadata  []
            TotalByteSize:  226
            NumRows:  3
            SotringColumns:  []
            FileOffset:  0xc000028978
            TotalCompressedSize:  0xc000028980
            Ordinal:  0xc000028988
    KeyValueMetaData:  [KeyValue({Key:pandas Value:0xc000063330}) KeyValue({Key:ARROW:schema Value:0xc000063340})]
    CreatedBy:  0xc000063350
    ColumnOrders:  [ColumnOrder({TYPE_ORDER:TypeDefinedOrder({})}) ColumnOrder({TYPE_ORDER:TypeDefinedOrder({})}) ColumnOrder({TYPE_ORDER:TypeDefinedOrder({})})]
    EncryptionAlgorithm:  <nil>
    FooterSigningKeyMetadata:  []

    Benchmark result

    go-parquet-tools is 100x faster than pypi parquet-tools.

    Command Mean [ms] Min [ms] Max [ms] Relative
    parquet-tools csv test_resources/test1.parquet 702.8 ± 19.9 676.2 739.4 1.00
    go-parquet-tools csv test_resources/test1.parquet 6.6 ± 0.4 6.2 7.3 1.00

    https://github.com/sharkdp/hyperfine

    Visit original content creator repository
    https://github.com/ktrueda/go-parquet-tools

  • star-join-spark

    Spark Star Join

    You will find in this repository the implementation of two efficient solutions for Star Joins using Spark framework, dropping the computation time by at least 60% when compared to other solutions available. Namely, they are the Spark Bloom-Filtered Cascade Join (SBFCJ) and the Spark Broadcast Join (SBJ). Each of these strategies excel in different scenarios (for details, click here): SBJ is twice faster when the memory available to each executor is large enough; and SBFCJ is remarkably resilient to low memory scenarios. As of now, these algorithms are very competitive, and may be easily combined with other technologies for further improvement (such as new data types or file managements).

    You will also find a direct Spark implementation of a sequence of joins, which delivers very poor performance and is far from being eligible as a good solution. This shows the importance of additional filtering.

    These strategies were presented and studied in our recent paper, Brito et al, “Faster Cloud Star Joins with Reduced Disk Spill and Network Communication. If you find it useful for your own research/applications, please cite our work and/or star our repository. If you need more information or have suggestions, feel free to either contact me or make a PR. Feedback is very important.

    Relevant papers

    If you find this useful, please star this repository and/or cite our paper:

    Jaqueline Brito, Thiago Mosqueiro, Ricardo R Ciferri and Cristina DA Ciferri. Faster Cloud Star Joins with Reduced Disk Spill and Network Communication. Chemometrics and Intelligent Laboratory Systems 2016.

    Acknowledgements

    We acknowledge Microsoft Azure Research grant MS-AZR-0036P, FAPESP grant 2012/13158-9 and CNPq grant 234817/2014-3.

    License

    Feel free to use this code for studying, applying to your own problems, or anything that complies with the MIT License (MIT), available in the folder License. If you use this code, we kindly ask that you cite our paper Brito et al, where both of these strategies were presented and studied.

    Visit original content creator repository
    https://github.com/jaquejbrito/star-join-spark

  • pmi

    I have been fumbling around this for a while now, say a couple of years off and
    on. I have played with different ideas and approaches. Read some stuff, read
    some other stuff, re-read the original stuff. Talked with people about how it
    works. Doodled endlessly and wrote pages of notes. All to discover that it was
    right in front of me the whole time and it really is fairly straight forward.

    P-M interaction diagrams determine the capacity envelope of a reinforced concrete
    member with a combination of axial force and moment applied at a section of the
    member.

    The maximum usable concrete strain is given from experimentation as 0.003. Then
    it is a matter of iterating over a range of curvature values or neutral
    axis locations, either one works because they are related by a single equation.
    The strain in the steel is determined based on distance from the neutral axis.
    The stress in the steel is then calculated based on the strain in the steel and
    modulus of elasticity. Alternatively, a value directly from the stress-strain
    diagram could be use. Also depending on the material model it could be
    elastic-perfectly plastic or a more exact model. Finally, the forces and moments
    on the cross-section are summed. For each iteration a corresponding P-M pair are
    added to the array.

    The one thing I am still unsure about is how a cracked section analysis plays
    into the development of P-M interaction diagrams. The answer to this is that the
    cracked section is taken care of by the stress-strain diagram of the concrete.
    Instead of using a Whitney stress block one can find the actual strain at
    discrete points, calculate the stress based on the strain and the stress-strain
    diagram, multiply the stress by the assumed area the discrete point represents
    to get a force for use in the force and moment equilibrium.

    Visit original content creator repository
    https://github.com/mwhit74/pmi

  • MinerLamp

    MinerLamp

    standard-readme compliant Releases Gitter

    MinerLamp is a graphical interface to use with ethminer with advanced watchdog options and monitoring.

    MinerLamp, written in Qt (C++), is a GUI with options to run ethminer under control. It can be built on Linux, Mac and Windows.

    Features

    • simple GUI that can run on Windows, Linux and Mac (Qt).
    • automated ethminer parameters.
    • watchdog for restarting ethminer in case of errors.
    • enhanced displaying of ethminer output – highlighted successful solutions, shares & zero hashrate
    • display useful information for nVidia cards – core temperatures, fan speed, memory and gpu clock, power drawing – a minimum and a maximum from all cards – easy for spotting OC issues.
    • funny option like blinking cards LEDs on share accepted.
    • integrated overclocking of nVidia cards!
    • integrated pools monitoring – in progress …

    Table of Contents

    Download

    You can find the ready to use package in Releases.

    Install

    MinerLamp uses Qt as main developpement framework. The binary (MinerLamp.exe) depends on several Qt libraries. You can find them in the archive availabe on the release page [https://github.com/orkblutt/MinerLamp/releases]. nVidia option needs the nvml.dll (or .so). On Windows you can copy and paste it in your MinerLamp directory or add path in the PATH environment variable. Traditionaly, the path is: “C:\Program Files\NVIDIA Corporation\NVSMI”

    Usage

    1 – Start MinerLamp.

    2 – Fill the ethminer binary path in the first edit box, e.g.:

    C:\Users\USERNAME\Desktop\ethminer\ethminer.exe

    3 – Fill the ethminer’s startup parameters, as you would normally use on your ethminer startup script, but excluding “ethminer.exe” before, e.g.:

    –farm-recheck 2000 -U -S POOL_ADDRESS:PORT -O WALLET_ID.MINER_NAME/E-MAIL_FOR_MONITORING

    4 – Check the options you want to activate.

    5 – Click on “Start mining”.

    All the parameters you set are automaticaly saved and MinerLamp will use them on next run.

    Please note, “Auto mining” isn’t auto startup of the program! We suggest to autorun the minerlamp.exe on startup > head to Users/USERNAME/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup and paste a linked file for autostarting.

    Build

    You need Qt installed (5.9 was used but all 5.x versions are compatible). Open the .pro file with Qt Creator. Clic on “configure” with the good configuration. Then you can build (execute qmake and compile). If you don’t want to build for nVIDIA GPU, delete the NVIDIA flag in the DEFINE option in the .pro file.

    Contribute

    Feel free to make pull request. You can also chat with other users and me on Gitter.

    Donation

    By default, the binaries built using the code include an automatic donation system: Every 4 hours, ethminer will be restarted using the 0xa07a8c9975145bb5371e8b3c31acb62ad9d0698e ethereum address in the parameters during 5 minutes. When this 5 minutes delay is gone, ethminer is restarted using the user credentials. If you prefer to not use this system, you have to uncheck the donation checkbox in the About dialog.

    You can also donate directly on 0xa07a8c9975145bb5371e8b3c31acb62ad9d0698e 😉

    TODO

    • CMake
    • Support for AMD
    • build Linux and Mac releases
    • monitoring for various pools
    Visit original content creator repository https://github.com/orkblutt/MinerLamp
  • MinerLamp

    MinerLamp

    standard-readme compliant Releases Gitter

    MinerLamp is a graphical interface to use with ethminer with advanced watchdog options and monitoring.

    MinerLamp, written in Qt (C++), is a GUI with options to run ethminer under control. It can be built on Linux, Mac and Windows.

    Features

    • simple GUI that can run on Windows, Linux and Mac (Qt).
    • automated ethminer parameters.
    • watchdog for restarting ethminer in case of errors.
    • enhanced displaying of ethminer output – highlighted successful solutions, shares & zero hashrate
    • display useful information for nVidia cards – core temperatures, fan speed, memory and gpu clock, power drawing – a minimum and a maximum from all cards – easy for spotting OC issues.
    • funny option like blinking cards LEDs on share accepted.
    • integrated overclocking of nVidia cards!
    • integrated pools monitoring – in progress …

    Table of Contents

    Download

    You can find the ready to use package in Releases.

    Install

    MinerLamp uses Qt as main developpement framework. The binary (MinerLamp.exe) depends on several Qt libraries. You can find them in the archive availabe on the release page [https://github.com/orkblutt/MinerLamp/releases]. nVidia option needs the nvml.dll (or .so). On Windows you can copy and paste it in your MinerLamp directory or add path in the PATH environment variable. Traditionaly, the path is: “C:\Program Files\NVIDIA Corporation\NVSMI”

    Usage

    1 – Start MinerLamp.

    2 – Fill the ethminer binary path in the first edit box, e.g.:

    C:\Users\USERNAME\Desktop\ethminer\ethminer.exe

    3 – Fill the ethminer’s startup parameters, as you would normally use on your ethminer startup script, but excluding “ethminer.exe” before, e.g.:

    –farm-recheck 2000 -U -S POOL_ADDRESS:PORT -O WALLET_ID.MINER_NAME/E-MAIL_FOR_MONITORING

    4 – Check the options you want to activate.

    5 – Click on “Start mining”.

    All the parameters you set are automaticaly saved and MinerLamp will use them on next run.

    Please note, “Auto mining” isn’t auto startup of the program! We suggest to autorun the minerlamp.exe on startup > head to Users/USERNAME/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup and paste a linked file for autostarting.

    Build

    You need Qt installed (5.9 was used but all 5.x versions are compatible). Open the .pro file with Qt Creator. Clic on “configure” with the good configuration. Then you can build (execute qmake and compile). If you don’t want to build for nVIDIA GPU, delete the NVIDIA flag in the DEFINE option in the .pro file.

    Contribute

    Feel free to make pull request. You can also chat with other users and me on Gitter.

    Donation

    By default, the binaries built using the code include an automatic donation system: Every 4 hours, ethminer will be restarted using the 0xa07a8c9975145bb5371e8b3c31acb62ad9d0698e ethereum address in the parameters during 5 minutes. When this 5 minutes delay is gone, ethminer is restarted using the user credentials. If you prefer to not use this system, you have to uncheck the donation checkbox in the About dialog.

    You can also donate directly on 0xa07a8c9975145bb5371e8b3c31acb62ad9d0698e 😉

    TODO

    • CMake
    • Support for AMD
    • build Linux and Mac releases
    • monitoring for various pools
    Visit original content creator repository https://github.com/orkblutt/MinerLamp
  • ai4chem

    ********Still under development********

    ai4chem

    Deep Learning for Chem

    Create and Train Tokenizer

    
        data_path='data/drug_token/'
        dest_path='data/models/'
        model_name='covid-tokenizer'
       
        paths = [str(x) for x in Path(data_path).glob("**/*.txt")]
    
        tokenizer = Tokenizer(BPE())
       
        tokenizer.pre_tokenizer = pre_tokenizers.PreTokenizer.custom(MoleculePretokenizer())
        tokenizer.decoder = decoders.Decoder.custom(MoleculePretokenizer())
        trainer = trainers.BpeTrainer(special_tokens=[ "<mask>",'<pad>'])
        tokenizer.train(trainer, paths)
        # And now it is ready, we can save the vocabulary with
        tokenizer.model.save(dest_path, model_name)
        
    
    Create and Train Tokenizer

        data_path="data/drug_token/"
        dest_path='data/models/'
        model_name='covid-tokenizer'
       
        paths = [str(x) for x in Path(data_path).glob("**/*.txt")]
        tokenizer = ChemByteLevelBPETokenizer()
        tokenizer.train(trainer, paths)
        tokenizer.save(dest_path, model_name)
    
    
    Use Tokenizer

            logger = logging.getLogger(__name__)    
            merges = "data/models/covid-tokenizer-merges.txt"
            vocab = "data/models/covid-tokenizer-vocab.json"
            tokenizer = ChemByteLevelBPETokenizer(vocab, merges)
            tokenizer.add_special_tokens(["<pad>", "<mask>"])
            tokenizer.enable_truncation(max_length=120)
            tokenizer.enable_padding(max_length=120, pad_token='<pad>')
            tokenizer.decode(tokenizer.encode('c1ccccc1OCCOC(=O)CC').ids)
            suppl = Chem.SDMolSupplier('../data/active.sdf')
           
            for mol in suppl:
                smi=Chem.MolToSmiles(mol)
                decoded_smi=tokenizer.decode(tokenizer.encode(smi).ids)
                if decoded_smi ==smi:
                         logger.info('correct')
                else:
                          logger.info('not correct')
    

    Affinity Prediciton

    ```
    max_seq_length = 512
    
    merges = "data/models/covid-tokenizer-merges.txt"
    vocab = "data/models/covid-tokenizer-vocab.json"
    tokenizer = ChemByteLevelBPETokenizer(vocab, merges)
    tokenizer.add_special_tokens(["<pad>", "<mask>"])
    tokenizer.enable_truncation(max_length=120)
    config = BertConfig.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
    
    affinityPrecictor = TFBertForAffinityPrediction(config)
    
    dataset_file = "desc_canvas_aug30.csv"
    dataset = pd.read_csv(os.path.join('../data', dataset_file))
    train_dataset = dataset.sample(frac=0.8, random_state=0)
    test_dataset = dataset.drop(train_dataset.index)
    
    num_test_batch = 12
    molecules = []
    train = []
    labels = []
    
    optimizer = tf.keras.optimizers.RMSprop(0.001)
    
    affinityPrecictor.compile(loss='mse', optimizer=optimizer,
                              metrics=['mae', 'mse'])
    train = [[i['mol'][:511], i['mol'][:511]] for _, i in islice(train_dataset.iterrows(), num_test_batch)]
    labels = [i['pIC50'] for _, i in islice(train_dataset.iterrows(), num_test_batch)]
    
    train = tokenizer.batch_encode_plus(train, return_tensors="pt", add_special_tokens=True, pad_to_max_length=True)[
        "input_ids"]
    history = affinityPrecictor.fit(tf.convert_to_tensor(train), tf.convert_to_tensor(labels),
                                    epochs=200, verbose=0, validation_split=0.2,
                                    callbacks=[tfdocs.modeling.EpochDots(report_every=2)])
    print(affinityPrecictor.predict(tf.convert_to_tensor(train)))
    plotter = tfdocs.plots.HistoryPlotter(smoothing_std=2)
    plotter.plot({'Basic': history}, metric="mae")
    
    plt.ylabel('MAE [MPG]')
    plotter.plot({'Basic': history}, metric="mse")
    
    plt.ylabel('MSE [MPG^2]')
    

    Visit original content creator repository
    https://github.com/ai4u-ai/ai4chem