Skip to content

Commit

Permalink
Add format file
Browse files Browse the repository at this point in the history
  • Loading branch information
AntonReinhard committed Sep 2, 2024
1 parent fda6cc5 commit ba14dca
Show file tree
Hide file tree
Showing 37 changed files with 214 additions and 255 deletions.
2 changes: 2 additions & 0 deletions .JuliaFormatter.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# See https://domluna.github.io/JuliaFormatter.jl/stable/ for a list of options
style = "blue"
2 changes: 1 addition & 1 deletion .formatting/format_all.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ using JuliaFormatter
# we asume the format_all.jl script is located in .formatting
project_path = Base.Filesystem.joinpath(Base.Filesystem.dirname(Base.source_path()), "..")

not_formatted = format(project_path; verbose = true)
not_formatted = format(project_path; verbose=true)
if not_formatted
@info "Formatting verified."
else
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/BuildDeployDoc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
permissions:
contents: write
statuses: write
runs-on: self-hosted
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v1
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/CompatHelper.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ permissions:
jobs:
CompatHelper:
if: github.repository == 'GraphComputing-jl/GraphComputing.jl'
runs-on: self-hosted
runs-on: ubuntu-latest
steps:
- name: Check if Julia is already available in the PATH
id: julia_in_path
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/formatter.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: formatter
on: [pull_request]
jobs:
formatter:
runs-on: self-hosted
runs-on: ubuntu-latest
steps:
- name: checkout repo
uses: actions/checkout@v4
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/unit_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ env:

jobs:
test:
runs-on: self-hosted
runs-on: ubuntu-latest

steps:
- name: Checkout repository
Expand Down
28 changes: 14 additions & 14 deletions docs/make.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
using Pkg

project_path = Base.Filesystem.joinpath(Base.Filesystem.dirname(Base.source_path()), "..")
Pkg.develop(; path = project_path)
Pkg.develop(; path=project_path)

using Documenter
using GraphComputing
Expand All @@ -25,20 +25,20 @@ pages = [
]

makedocs(;
modules = [GraphComputing],
checkdocs = :exports,
authors = "Anton Reinhard",
repo = Documenter.Remotes.GitHub("GraphComputing-jl", "GraphComputing.jl"),
sitename = "GraphComputing.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://graphcomputing.gitlab.io/GraphComputing.jl",
assets = String[],
modules=[GraphComputing],
checkdocs=:exports,
authors="Anton Reinhard",
repo=Documenter.Remotes.GitHub("GraphComputing-jl", "GraphComputing.jl"),
sitename="GraphComputing.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://graphcomputing.gitlab.io/GraphComputing.jl",
assets=String[],
),
pages = pages,
pages=pages,
)
deploydocs(;
repo = "github.com/GraphComputing-jl/GraphComputing.jl.git",
push_preview = false,
devbranch = "main",
repo="github.com/GraphComputing-jl/GraphComputing.jl.git",
push_preview=false,
devbranch="main",
)
54 changes: 28 additions & 26 deletions src/code_gen/function.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ function get_compute_function(
graph::DAG,
instance,
machine::Machine,
cache_module::Module = @__MODULE__,
context_module::Module = @__MODULE__
cache_module::Module=@__MODULE__,
context_module::Module=@__MODULE__
)
tape = gen_tape(graph, instance, machine, cache_module, context_module)

Expand All @@ -25,15 +25,15 @@ function get_compute_function(
functionId = to_var_name(UUIDs.uuid1(rng[1]))
resSym = eval(gen_access_expr(entry_device(tape.machine), tape.outputSymbol))
expr = #
Expr(
:function, # function definition
Expr(
:function, # function definition
Expr(
:call,
Symbol("compute_$functionId"),
Expr(:(::), :data_input, input_type(instance)),
), # function name and parameters
Expr(:block, initCaches, assignInputs, code, Expr(:return, resSym)), # function body
)
:call,
Symbol("compute_$functionId"),
Expr(:(::), :data_input, input_type(instance)),
), # function name and parameters
Expr(:block, initCaches, assignInputs, code, Expr(:return, resSym)), # function body
)

return RuntimeGeneratedFunction(cache_module, context_module, expr)
end
Expand All @@ -51,8 +51,8 @@ function get_cuda_kernel(
graph::DAG,
instance,
machine::Machine,
cache_module::Module = @__MODULE__,
context_module::Module = @__MODULE__
cache_module::Module=@__MODULE__,
context_module::Module=@__MODULE__
)
tape = gen_tape(graph, instance, machine, cache_module, context_module)

Expand All @@ -62,18 +62,20 @@ function get_cuda_kernel(

functionId = to_var_name(UUIDs.uuid1(rng[1]))
resSym = eval(gen_access_expr(entry_device(tape.machine), tape.outputSymbol))
expr = Meta.parse("function compute_$(functionId)(input_vector, output_vector, n::Int64)
id = (blockIdx().x - 1) * blockDim().x + threadIdx().x
if (id > n)
return
end
@inline data_input = input_vector[id]
$(initCaches)
$(assignInputs)
$code
@inline output_vector[id] = $resSym
return nothing
end")
expr = Meta.parse(
"function compute_$(functionId)(input_vector, output_vector, n::Int64)
id = (blockIdx().x - 1) * blockDim().x + threadIdx().x
if (id > n)
return
end
@inline data_input = input_vector[id]
$(initCaches)
$(assignInputs)
$code
@inline output_vector[id] = $resSym
return nothing
end"
)

return RuntimeGeneratedFunction(cache_module, context_module, expr)
end
Expand Down Expand Up @@ -101,8 +103,8 @@ function execute(
instance,
machine::Machine,
input,
cache_module::Module = @__MODULE__,
context_module::Module = @__MODULE__
cache_module::Module=@__MODULE__,
context_module::Module=@__MODULE__
)
tape = gen_tape(graph, instance, machine, cache_module, context_module)
return execute_tape(tape, input)
Expand Down
49 changes: 19 additions & 30 deletions src/code_gen/tape_machine.jl
Original file line number Diff line number Diff line change
@@ -1,40 +1,38 @@
# TODO: do this with macros
function call_fc(
fc::FunctionCall{VectorT,0},
cache::Dict{Symbol,Any},
fc::FunctionCall{VectorT,0}, cache::Dict{Symbol,Any}
) where {VectorT<:SVector{1}}
cache[fc.return_symbol] = fc.func(cache[fc.arguments[1]])
return nothing
end

function call_fc(
fc::FunctionCall{VectorT,1},
cache::Dict{Symbol,Any},
fc::FunctionCall{VectorT,1}, cache::Dict{Symbol,Any}
) where {VectorT<:SVector{1}}
cache[fc.return_symbol] = fc.func(fc.value_arguments[1], cache[fc.arguments[1]])
return nothing
end

function call_fc(
fc::FunctionCall{VectorT,0},
cache::Dict{Symbol,Any},
fc::FunctionCall{VectorT,0}, cache::Dict{Symbol,Any}
) where {VectorT<:SVector{2}}
cache[fc.return_symbol] = fc.func(cache[fc.arguments[1]], cache[fc.arguments[2]])
return nothing
end

function call_fc(
fc::FunctionCall{VectorT,1},
cache::Dict{Symbol,Any},
fc::FunctionCall{VectorT,1}, cache::Dict{Symbol,Any}
) where {VectorT<:SVector{2}}
cache[fc.return_symbol] =
fc.func(fc.value_arguments[1], cache[fc.arguments[1]], cache[fc.arguments[2]])
cache[fc.return_symbol] = fc.func(
fc.value_arguments[1], cache[fc.arguments[1]], cache[fc.arguments[2]]
)
return nothing
end

function call_fc(fc::FunctionCall{VectorT,1}, cache::Dict{Symbol,Any}) where {VectorT}
cache[fc.return_symbol] =
fc.func(fc.value_arguments[1], getindex.(Ref(cache), fc.arguments)...)
cache[fc.return_symbol] = fc.func(
fc.value_arguments[1], getindex.(Ref(cache), fc.arguments)...
)
return nothing
end

Expand All @@ -46,14 +44,16 @@ Execute the given [`FunctionCall`](@ref) on the dictionary.
Several more specialized versions of this function exist to reduce vector unrolling work for common cases.
"""
function call_fc(fc::FunctionCall{VectorT,M}, cache::Dict{Symbol,Any}) where {VectorT,M}
cache[fc.return_symbol] =
fc.func(fc.value_arguments..., getindex.(Ref(cache), fc.arguments)...)
cache[fc.return_symbol] = fc.func(
fc.value_arguments..., getindex.(Ref(cache), fc.arguments)...
)
return nothing
end

function expr_from_fc(fc::FunctionCall{VectorT,0}) where {VectorT}
func_call =
Expr(:call, fc.func, eval.(gen_access_expr.(Ref(fc.device), fc.arguments))...)
func_call = Expr(
:call, fc.func, eval.(gen_access_expr.(Ref(fc.device), fc.arguments))...
)
access_expr = eval(gen_access_expr(fc.device, fc.return_symbol))

return Expr(:(=), access_expr, func_call)
Expand Down Expand Up @@ -155,7 +155,7 @@ function gen_tape(
machine::Machine,
cache_module::Module,
context_module::Module,
scheduler::AbstractScheduler = GreedyScheduler(),
scheduler::AbstractScheduler=GreedyScheduler(),
)
schedule = schedule_dag(scheduler, graph, machine)

Expand All @@ -174,22 +174,11 @@ function gen_tape(

initCaches = gen_cache_init_code(machine)
assign_inputs = gen_input_assignment_code(
inputSyms,
instance,
machine,
cache_module,
context_module,
inputSyms, instance, machine, cache_module, context_module
)

return Tape{input_type(instance)}(
initCaches,
assign_inputs,
schedule,
inputSyms,
outSym,
Dict(),
instance,
machine,
initCaches, assign_inputs, schedule, inputSyms, outSym, Dict(), instance, machine
)
end

Expand Down
2 changes: 1 addition & 1 deletion src/devices/cuda/impl.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ end
Return a Vector of [`CUDAGPU`](@ref)s available on the current machine. If `verbose` is true, print some additional information.
"""
function get_devices(deviceType::Type{T}; verbose::Bool = false) where {T<:CUDAGPU}
function get_devices(deviceType::Type{T}; verbose::Bool=false) where {T<:CUDAGPU}
devices = Vector{AbstractDevice}()

if !CUDA.functional()
Expand Down
4 changes: 2 additions & 2 deletions src/devices/detect.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
Return the [`Machine`](@ref) currently running on. The parameter `verbose` defaults to true when interactive.
"""
function get_machine_info(; verbose::Bool = Base.is_interactive)
function get_machine_info(; verbose::Bool=Base.is_interactive)
devices = Vector{AbstractDevice}()

for device in device_types()
devs = get_devices(device, verbose = verbose)
devs = get_devices(device; verbose=verbose)
for dev in devs
push!(devices, dev)
end
Expand Down
6 changes: 3 additions & 3 deletions src/devices/measure.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
Measure FLOPS, RAM, cache sizes and what other properties can be extracted for the devices in the given machine.
"""
function measure_devices!(machine::Machine; verbose::Bool = Base.is_interactive())
function measure_devices!(machine::Machine; verbose::Bool=Base.is_interactive())
for device in machine.devices
measure_device!(device; verbose = verbose)
measure_device!(device; verbose=verbose)
end

return nothing
Expand All @@ -16,7 +16,7 @@ end
Measure the transfer rates between devices in the machine.
"""
function measure_transfer_rates!(machine::Machine; verbose::Bool = Base.is_interactive())
function measure_transfer_rates!(machine::Machine; verbose::Bool=Base.is_interactive())
# TODO implement
return nothing
end
6 changes: 3 additions & 3 deletions src/devices/numa/impl.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@ end
Return a Vector of [`NumaNode`](@ref)s available on the current machine. If `verbose` is true, print some additional information.
"""
function get_devices(deviceType::Type{T}; verbose::Bool = false) where {T<:NumaNode}
function get_devices(deviceType::Type{T}; verbose::Bool=false) where {T<:NumaNode}
devices = Vector{AbstractDevice}()
noNumaNodes = highest_numa_node()

if (verbose)
println("Found $(noNumaNodes + 1) NUMA nodes")
end
for i = 0:noNumaNodes
for i in 0:noNumaNodes
push!(devices, NumaNode(i, 1, default_strategy(NumaNode), -1, UUIDs.uuid1(rng[1])))
end

Expand All @@ -62,7 +62,7 @@ function gen_cache_init_code(device::NumaNode)
end

return error(
"Unimplemented cache strategy \"$(device.cacheStrategy)\" for device \"$(device)\"",
"Unimplemented cache strategy \"$(device.cacheStrategy)\" for device \"$(device)\""
)
end

Expand Down
2 changes: 1 addition & 1 deletion src/devices/oneapi/impl.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ end
Return a Vector of [`oneAPIGPU`](@ref)s available on the current machine. If `verbose` is true, print some additional information.
"""
function get_devices(deviceType::Type{T}; verbose::Bool = false) where {T<:oneAPIGPU}
function get_devices(deviceType::Type{T}; verbose::Bool=false) where {T<:oneAPIGPU}
devices = Vector{AbstractDevice}()

if !oneAPI.functional()
Expand Down
2 changes: 1 addition & 1 deletion src/devices/rocm/impl.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ end
Return a Vector of [`ROCmGPU`](@ref)s available on the current machine. If `verbose` is true, print some additional information.
"""
function get_devices(deviceType::Type{T}; verbose::Bool = false) where {T<:ROCmGPU}
function get_devices(deviceType::Type{T}; verbose::Bool=false) where {T<:ROCmGPU}
devices = Vector{AbstractDevice}()

if !AMDGPU.functional()
Expand Down
8 changes: 4 additions & 4 deletions src/diff/properties.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ The fields are `.addedNodes`, `.addedEdges`, `.removedNodes` and `.removedEdges`
"""
function length(diff::Diff)
return (
addedNodes = length(diff.addedNodes),
removedNodes = length(diff.removedNodes),
addedEdges = length(diff.addedEdges),
removedEdges = length(diff.removedEdges),
addedNodes=length(diff.addedNodes),
removedNodes=length(diff.removedNodes),
addedEdges=length(diff.addedEdges),
removedEdges=length(diff.removedEdges),
)
end
Loading

0 comments on commit ba14dca

Please sign in to comment.